From 7f626735f623ec19b106a2fe8675a6dd40837970 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Fri, 15 Nov 2019 00:49:48 +0000 Subject: [PATCH 01/43] Merged PR 2339: Initial development on QML library. This PR represents joint with with @Alex Bocharov, and begins open development on a quantum machine learning library. --- Build/build.ps1 | 5 +- Build/pack.ps1 | 3 + Chemistry/src/DataModel/DataModel.csproj | 2 +- Chemistry/src/Jupyter/Jupyter.csproj | 4 +- Chemistry/src/Runtime/Runtime.csproj | 2 +- .../tests/ChemistryTests/QSharpTests.csproj | 4 +- .../tests/DataModelTests/CSharpTests.csproj | 4 +- .../tests/SamplesTests/SamplesTests.csproj | 4 +- .../tests/SystemTests/SystemTests.csproj | 4 +- MachineLearning.sln | 72 + MachineLearning/Common/267DevDivSNKey2048.snk | Bin 0 -> 288 bytes MachineLearning/Common/DelaySign.cs | 24 + .../src/DataModel/DataModel.csproj | 47 + MachineLearning/src/DataModel/Interop.cs | 334 +++++ MachineLearning/src/Runtime/Circuits.qs | 1153 +++++++++++++++++ MachineLearning/src/Runtime/Classification.qs | 69 + MachineLearning/src/Runtime/Convert.qs | 63 + MachineLearning/src/Runtime/Deprecated.qs | 84 ++ MachineLearning/src/Runtime/Examples.qs | 96 ++ MachineLearning/src/Runtime/Features.qs | 9 + MachineLearning/src/Runtime/InputEncoding.qs | 112 ++ .../src/Runtime/Properties/NamespaceInfo.qs | 11 + .../src/Runtime/RotationSequences.qs | 51 + MachineLearning/src/Runtime/Runtime.csproj | 15 + .../src/Runtime/SpecialMultiplexor.qs | 252 ++++ MachineLearning/src/Runtime/SpecialSP.qs | 242 ++++ MachineLearning/src/Runtime/Training.qs | 413 ++++++ MachineLearning/src/Runtime/Types.qs | 54 + MachineLearning/src/Runtime/Utils.qs | 12 + MachineLearning/src/Runtime/Validation.qs | 76 ++ .../tests/MachineLearningTests.csproj | 26 + Numerics/src/Numerics.csproj | 2 +- Numerics/tests/NumericsTests.csproj | 4 +- Standard/src/Arithmetic/Reflections.qs | 29 + Standard/src/Arrays/Arrays.qs | 14 + .../src/Canon/Combinators/ApplyToElement.qs | 114 ++ Standard/src/Logical/Predicates.qs | 46 + Standard/src/Math/Functions.qs | 23 + Standard/src/Measurement/Reset.qs | 20 + Standard/src/Standard.csproj | 2 +- Standard/tests/Standard.Tests.csproj | 4 +- 41 files changed, 3486 insertions(+), 19 deletions(-) create mode 100644 MachineLearning.sln create mode 100644 MachineLearning/Common/267DevDivSNKey2048.snk create mode 100644 MachineLearning/Common/DelaySign.cs create mode 100644 MachineLearning/src/DataModel/DataModel.csproj create mode 100644 MachineLearning/src/DataModel/Interop.cs create mode 100644 MachineLearning/src/Runtime/Circuits.qs create mode 100644 MachineLearning/src/Runtime/Classification.qs create mode 100644 MachineLearning/src/Runtime/Convert.qs create mode 100644 MachineLearning/src/Runtime/Deprecated.qs create mode 100644 MachineLearning/src/Runtime/Examples.qs create mode 100644 MachineLearning/src/Runtime/Features.qs create mode 100644 MachineLearning/src/Runtime/InputEncoding.qs create mode 100644 MachineLearning/src/Runtime/Properties/NamespaceInfo.qs create mode 100644 MachineLearning/src/Runtime/RotationSequences.qs create mode 100644 MachineLearning/src/Runtime/Runtime.csproj create mode 100644 MachineLearning/src/Runtime/SpecialMultiplexor.qs create mode 100644 MachineLearning/src/Runtime/SpecialSP.qs create mode 100644 MachineLearning/src/Runtime/Training.qs create mode 100644 MachineLearning/src/Runtime/Types.qs create mode 100644 MachineLearning/src/Runtime/Utils.qs create mode 100644 MachineLearning/src/Runtime/Validation.qs create mode 100644 MachineLearning/tests/MachineLearningTests.csproj create mode 100644 Standard/src/Arithmetic/Reflections.qs create mode 100644 Standard/src/Canon/Combinators/ApplyToElement.qs diff --git a/Build/build.ps1 b/Build/build.ps1 index 8743ab2f623..4a9f89c5858 100644 --- a/Build/build.ps1 +++ b/Build/build.ps1 @@ -34,7 +34,10 @@ Build-One 'publish' '../Chemistry.sln' Write-Host "##[info]Build Numerics library" Build-One 'publish' '../Numerics.sln' -Write-Host "##[info]Build Standard library" +Write-Host "##[info]Build QML library" +Build-One 'publish' '../MachineLearning.sln' + +Write-Host "##[info]Build Jupyter magic library" Build-One 'publish' '../Magic.sln' if (-not $all_ok) { diff --git a/Build/pack.ps1 b/Build/pack.ps1 index d2742c51184..a69ed719b75 100644 --- a/Build/pack.ps1 +++ b/Build/pack.ps1 @@ -28,6 +28,9 @@ Pack-One '../Standard/src/Standard.csproj' Write-Host "##[info]Pack Chemistry library" Pack-One '../Chemistry/src/DataModel/DataModel.csproj' +Write-Host "##[info]Pack QML library" +Pack-One '../MachineLearning/src/DataModel/DataModel.csproj' + Write-Host "##[info]Pack Numerics library" Pack-One '../Numerics/src/Numerics.csproj' diff --git a/Chemistry/src/DataModel/DataModel.csproj b/Chemistry/src/DataModel/DataModel.csproj index 99fa589a46d..9704eb3c77d 100644 --- a/Chemistry/src/DataModel/DataModel.csproj +++ b/Chemistry/src/DataModel/DataModel.csproj @@ -35,7 +35,7 @@ - + diff --git a/Chemistry/src/Jupyter/Jupyter.csproj b/Chemistry/src/Jupyter/Jupyter.csproj index 8d96c5b301a..a8a05ed2349 100644 --- a/Chemistry/src/Jupyter/Jupyter.csproj +++ b/Chemistry/src/Jupyter/Jupyter.csproj @@ -26,8 +26,8 @@ - - + + diff --git a/Chemistry/src/Runtime/Runtime.csproj b/Chemistry/src/Runtime/Runtime.csproj index 1b9f03c805c..442a13d14c9 100644 --- a/Chemistry/src/Runtime/Runtime.csproj +++ b/Chemistry/src/Runtime/Runtime.csproj @@ -15,7 +15,7 @@ - + diff --git a/Chemistry/tests/ChemistryTests/QSharpTests.csproj b/Chemistry/tests/ChemistryTests/QSharpTests.csproj index 783e80eaf17..66e2888e2c0 100644 --- a/Chemistry/tests/ChemistryTests/QSharpTests.csproj +++ b/Chemistry/tests/ChemistryTests/QSharpTests.csproj @@ -11,8 +11,8 @@ - - + + diff --git a/Chemistry/tests/DataModelTests/CSharpTests.csproj b/Chemistry/tests/DataModelTests/CSharpTests.csproj index 773cbcd10b6..9c3df4a3cce 100644 --- a/Chemistry/tests/DataModelTests/CSharpTests.csproj +++ b/Chemistry/tests/DataModelTests/CSharpTests.csproj @@ -24,8 +24,8 @@ - - + + diff --git a/Chemistry/tests/SamplesTests/SamplesTests.csproj b/Chemistry/tests/SamplesTests/SamplesTests.csproj index 9cb16df0fa5..af9581bbb83 100644 --- a/Chemistry/tests/SamplesTests/SamplesTests.csproj +++ b/Chemistry/tests/SamplesTests/SamplesTests.csproj @@ -18,8 +18,8 @@ - - + + diff --git a/Chemistry/tests/SystemTests/SystemTests.csproj b/Chemistry/tests/SystemTests/SystemTests.csproj index 19e7741ae35..0da8e8b4d71 100644 --- a/Chemistry/tests/SystemTests/SystemTests.csproj +++ b/Chemistry/tests/SystemTests/SystemTests.csproj @@ -18,8 +18,8 @@ - - + + diff --git a/MachineLearning.sln b/MachineLearning.sln new file mode 100644 index 00000000000..2e787d25cc1 --- /dev/null +++ b/MachineLearning.sln @@ -0,0 +1,72 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 15 +VisualStudioVersion = 15.0.26124.0 +MinimumVisualStudioVersion = 15.0.26124.0 +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "MachineLearning", "MachineLearning", "{A16B06ED-70E8-4494-B040-D446F0F74588}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{E540364C-047F-446E-B8C1-CD41224E2282}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Runtime", "MachineLearning\src\Runtime\Runtime.csproj", "{4C399D64-0435-47E0-99D3-AA898E640717}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DataModel", "MachineLearning\src\DataModel\DataModel.csproj", "{E4A725A7-3525-4FC5-9794-4317B5DB9C9B}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "MachineLearningTests", "MachineLearning\tests\MachineLearningTests.csproj", "{0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {4C399D64-0435-47E0-99D3-AA898E640717}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {4C399D64-0435-47E0-99D3-AA898E640717}.Debug|Any CPU.Build.0 = Debug|Any CPU + {4C399D64-0435-47E0-99D3-AA898E640717}.Debug|x64.ActiveCfg = Debug|Any CPU + {4C399D64-0435-47E0-99D3-AA898E640717}.Debug|x64.Build.0 = Debug|Any CPU + {4C399D64-0435-47E0-99D3-AA898E640717}.Debug|x86.ActiveCfg = Debug|Any CPU + {4C399D64-0435-47E0-99D3-AA898E640717}.Debug|x86.Build.0 = Debug|Any CPU + {4C399D64-0435-47E0-99D3-AA898E640717}.Release|Any CPU.ActiveCfg = Release|Any CPU + {4C399D64-0435-47E0-99D3-AA898E640717}.Release|Any CPU.Build.0 = Release|Any CPU + {4C399D64-0435-47E0-99D3-AA898E640717}.Release|x64.ActiveCfg = Release|Any CPU + {4C399D64-0435-47E0-99D3-AA898E640717}.Release|x64.Build.0 = Release|Any CPU + {4C399D64-0435-47E0-99D3-AA898E640717}.Release|x86.ActiveCfg = Release|Any CPU + {4C399D64-0435-47E0-99D3-AA898E640717}.Release|x86.Build.0 = Release|Any CPU + {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Debug|Any CPU.Build.0 = Debug|Any CPU + {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Debug|x64.ActiveCfg = Debug|Any CPU + {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Debug|x64.Build.0 = Debug|Any CPU + {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Debug|x86.ActiveCfg = Debug|Any CPU + {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Debug|x86.Build.0 = Debug|Any CPU + {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Release|Any CPU.ActiveCfg = Release|Any CPU + {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Release|Any CPU.Build.0 = Release|Any CPU + {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Release|x64.ActiveCfg = Release|Any CPU + {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Release|x64.Build.0 = Release|Any CPU + {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Release|x86.ActiveCfg = Release|Any CPU + {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Release|x86.Build.0 = Release|Any CPU + {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Debug|Any CPU.Build.0 = Debug|Any CPU + {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Debug|x64.ActiveCfg = Debug|Any CPU + {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Debug|x64.Build.0 = Debug|Any CPU + {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Debug|x86.ActiveCfg = Debug|Any CPU + {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Debug|x86.Build.0 = Debug|Any CPU + {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Release|Any CPU.ActiveCfg = Release|Any CPU + {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Release|Any CPU.Build.0 = Release|Any CPU + {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Release|x64.ActiveCfg = Release|Any CPU + {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Release|x64.Build.0 = Release|Any CPU + {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Release|x86.ActiveCfg = Release|Any CPU + {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Release|x86.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(NestedProjects) = preSolution + {E540364C-047F-446E-B8C1-CD41224E2282} = {A16B06ED-70E8-4494-B040-D446F0F74588} + {4C399D64-0435-47E0-99D3-AA898E640717} = {E540364C-047F-446E-B8C1-CD41224E2282} + {E4A725A7-3525-4FC5-9794-4317B5DB9C9B} = {E540364C-047F-446E-B8C1-CD41224E2282} + {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B} = {A16B06ED-70E8-4494-B040-D446F0F74588} + EndGlobalSection +EndGlobal diff --git a/MachineLearning/Common/267DevDivSNKey2048.snk b/MachineLearning/Common/267DevDivSNKey2048.snk new file mode 100644 index 0000000000000000000000000000000000000000..7a1fffb890a43a90273251a13141c1b92d5b2672 GIT binary patch literal 288 zcmV+*0pI=rBme*mfB*m#0RR970ssI2Bme+XQ$aBR2mk;90097DGnudPj`5axxzmR}d}v>#e4lBjR&}2Era&fE7}!jUWnTqBY9YFX4_ zm5IimTdxd3d8Bg@*(FuY0BX#OSy}SEZV=;O!$mit(q`I#g@t8<$X$Wje?Gt-*;Y(g z**$SQ&=)L@BcTrhIk*g#uy;1~*svm0Do%7I8ec;wBr%IMIi;Y7_~kt>6JMO9e><|_ z3jYf5gp~yQeCj{(lH85p?X6_Sj{W0IR?`BlRsC*&^8(fH&Fbk>WsNjaRvTY7#X|4f m-JghsIMZ~_@dm8-**Zi;!^b??5%5 + + + netstandard2.1 + x64 + Microsoft.Quantum.MachineLearning.DataModel + bin\$(BuildConfiguration)\$(PlatformTarget)\$(AssemblyName).xml + + + + True + Microsoft + Microsoft's Quantum ML Libraries. + © Microsoft Corporation. All rights reserved. + Microsoft.Quantum.MachineLearning + See: https://docs.microsoft.com/en-us/quantum/relnotes/ + https://github.com/Microsoft/QuantumLibraries/raw/master/LICENSE.txt + https://github.com/Microsoft/QuantumLibraries/tree/master/Chemistry + https://secure.gravatar.com/avatar/bd1f02955b2853ba0a3b1cdc2434e8ec.png + Quantum Q# Qsharp + 1591 + true + true + true + true + snupkg + $(AllowedOutputExtensionsInPackageBuildOutputFolder);.pdb;.xml + + + + + + + + + + + + + + + + + + + + diff --git a/MachineLearning/src/DataModel/Interop.cs b/MachineLearning/src/DataModel/Interop.cs new file mode 100644 index 00000000000..262b17365b2 --- /dev/null +++ b/MachineLearning/src/DataModel/Interop.cs @@ -0,0 +1,334 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using Microsoft.Quantum.Simulation.Core; +using Microsoft.Quantum.Simulation.Simulators; +using System; +using System.IO; +using System.Linq; +using System.Runtime.InteropServices; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + + + +/// +/// This code space provides suggested interoperability classes for running the +/// Q# Qccc (quantum circuit-centric classifier) code on Microsoft quantum simulator +/// +namespace Microsoft.Quantum.MachineLearning.Interop +{ + using Microsoft.Quantum.Logical; + using Microsoft.Quantum.MachineLearning; + using System.Runtime.CompilerServices; + using System.Runtime.ExceptionServices; + using System.Runtime.InteropServices.ComTypes; + using System.Xml; + + + /// + /// Quick conversions into IQArray format + /// + public class Qonvert + { + + public static long ToC(char pauli) + { + if (pauli.Equals('I')) + { + return 0L; + } + if (pauli.Equals('X')) + { + return 1L; + } + if (pauli.Equals('Y')) + { + return 2L; + } + if (pauli.Equals('Z')) + { + return 3L; + } + return -1L; + } + + public static IQArray> ToQ(List src) + { + List> tmp = new List>(src.Count); + for (int ix = 0; ix < src.Count; ix++) + { + tmp.Add(new QArray(src[ix])); + } + return new QArray>(tmp.ToArray()); + } + + public static IQArray ToQ(List src) + { + return new QArray(src.ToArray()); + } + + public static IQArray> ToQ(List src) + { + List> tmp = new List>(src.Count); + for (int ix = 0; ix < src.Count; ix++) + { + tmp.Add(new QArray(src[ix])); + } + return new QArray>(tmp.ToArray()); + } + } //Qonvert + + public class ClassificationModel + { + long _nQubits; + IQArray> _structure; + IQArray _cachedParameters; + double _bias; + + public ClassificationModel(long nQubits) + { + _nQubits = nQubits; + _structure = null; + _cachedParameters = null; + _bias = -2.0; + } + + public ClassificationModel(long nQubits, List structure) + { + _nQubits = nQubits; + _structure = Qonvert.ToQ(structure); + _cachedParameters = null; + _bias = -2.0; + } + + public ClassificationModel(long nQubits, List structure,double[] parameters) + { + _nQubits = nQubits; + _structure = Qonvert.ToQ(structure); + _cachedParameters = new QArray(parameters); + _bias = -2.0; + } + + public ClassificationModel(long nQubits, List structure, double[] parameters, double bias) + { + _nQubits = nQubits; + _structure = Qonvert.ToQ(structure); + _cachedParameters = new QArray(parameters); + _bias = bias; + } + + public bool isTrained + { + get { return (_bias > -1.5) && (_structure != null) && (_cachedParameters != null); } + } + + public IQArray> CircuitStructure + { + get { return _structure; } + set { _structure = value; } + } + + public IQArray CachedParameters + { + get { return _cachedParameters; } + } + + public double Bias + { + get { return _bias; } + } + + /// + /// Creates a layer of nQubits Pauli rotations + /// + /// Number of qubits to rotate + /// Type of Pauli gate + /// Sequence of nQubits rotation templates + public static List LocalRotationsLayer(long nQubits, char pauli) + { + List ret = new List((int)nQubits); + for (long iq = 0; iq < nQubits; iq++) + { + long[] localRp = new long[] { -1, Qonvert.ToC(pauli), iq }; + ret.Add(localRp); + } + return ret; + } + + /// + /// Creates a layer of nQubits Pauli rotations + /// + /// Number of qubits to rotate + /// Type of Pauli gate + /// Sequence of nQubits rotation templates + public static List PartialLocalLayer(long[] indices, char pauli) + { + List ret = new List(indices.Length); + foreach (long iq in indices) + { + long[] localRp = new long[] { -1, Qonvert.ToC(pauli), iq }; + ret.Add(localRp); + } + return ret; + } + + /// + /// Creates a cyclic block of nQubits controlled rotations that starts + /// with contol qubit (nQubits-1), target qubit (cspan-1) % n , followed by the + /// ladder of entanglers with control qubit iq and target qubit (iq+cspan) % n + /// + /// Number of qubits to entangle + /// + /// index offset between control and target qubits + /// + public static List CyclicEntanglerLayer(long nQubits, char pauli, long cspan) + { + List ret = new List((int)nQubits); + ret.Add(new long[] { -1, Qonvert.ToC(pauli), (long) ((cspan-1) % nQubits), nQubits - 1 }); + for (long iq = 1; iq < nQubits; iq++) + { + long[] entRp = new long[] { -1, Qonvert.ToC(pauli), (long)(((iq + 1) * cspan - 1) % nQubits), (long)((iq*cspan - 1) % nQubits) }; + ret.Add(entRp); + } + return ret; + } + + public static List CyclicEntanglerLayer(long nQubits, char pauli) + { + return CyclicEntanglerLayer(nQubits, pauli, 1L); + } + + public static List JoinLayers(List> layers) + { + List structure = new List(layers.Count * layers[0].Count); + for (int ila = 0; ila < layers.Count; ila++) + { + structure.AddRange(layers[ila]); + } + return structure; + } + + public static void reindex(List struc) + { + for (int ix=0; ix < struc.Count; ix++) + { + long[] gt = struc[ix]; + gt[0] = ix; + struc[ix] = gt; + } + } + + public void QcccTrainSequential(IQArray> parameterSource, IQArray> trainingSet, IQArray trainingLabels, IQArray> trainingSchedule, + IQArray> validationSchedule, double learningRate, double tolerance, long miniBatchSize, long maxEpochs, long nMeasurements, uint randomizationSeed) + { + var sim = new QuantumSimulator(false, randomizationSeed); + (this._cachedParameters, this._bias) = + TrainQcccSequential.Run(sim, this._nQubits, this._structure, parameterSource, trainingSet, trainingLabels, trainingSchedule, validationSchedule, learningRate, tolerance, miniBatchSize, maxEpochs, nMeasurements).Result; + } + public void QcccTrainSequential(List parameterSource, List trainingSet, List trainingLabels, List trainingSchedule, + List validationSchedule, double learningRate, double tolerance, long miniBatchSize, long maxEpochs, long nMeasurements, uint randomizationSeed) + { + QcccTrainSequential(Qonvert.ToQ(parameterSource), Qonvert.ToQ(trainingSet), Qonvert.ToQ(trainingLabels), Qonvert.ToQ(trainingSchedule), + Qonvert.ToQ(validationSchedule), learningRate, tolerance, miniBatchSize, maxEpochs, nMeasurements, randomizationSeed); + } + + public void QcccTrainParallel(IQArray> parameterSource, IQArray> trainingSet, IQArray trainingLabels, IQArray> trainingSchedule, + IQArray> validationSchedule, double learningRate, double tolerance, long miniBatchSize, long maxEpochs, long nMeasurements, uint randomizationSeed) + { + var simAll = new List(parameterSource.Count); + var resultsAll = new List<(IQArray, double)>(parameterSource.Count); + var parameterComb = new List>>(parameterSource.Count); + + var indices = new int[parameterSource.Count]; + for (int j = 0; j < parameterSource.Count; j++) + { + indices[j] = j; + simAll.Add(new QuantumSimulator(false, randomizationSeed)); + resultsAll.Add((new QArray(),0.0)); + parameterComb.Add(new QArray>(new IQArray[] { parameterSource[j] })); //Isolating parameter starts - one per thread + } + Parallel.ForEach(indices, + (j) => + { + + var rslt = + TrainQcccSequential.Run(simAll[j], this._nQubits, this._structure, parameterComb[j], trainingSet, trainingLabels, trainingSchedule, validationSchedule, learningRate, tolerance, miniBatchSize, maxEpochs, nMeasurements).Result; + resultsAll[j] = rslt; + } + ); + //Estimated parameters and biases for each proposed parameter start. Now postprocess + long bestValidation = long.MaxValue; + int bestJ = -1; + var sim = new QuantumSimulator(false, randomizationSeed); + for (int j = 0; j < parameterSource.Count; j++) + { + var (pars, bias) = resultsAll[j]; + long misses = CountValidationMisses.Run(sim, tolerance, this._nQubits, trainingSet, trainingLabels, validationSchedule, this._structure, pars, bias, nMeasurements).Result; + if (bestValidation > misses) + { + bestValidation = misses; + bestJ = j; + } + } + (this._cachedParameters, this._bias) = resultsAll[bestJ]; + } //QcccTrainParallel + + public void QcccTrainParallel(List parameterSource, List trainingSet, List trainingLabels, List trainingSchedule, + List validationSchedule, double learningRate, double tolerance, long miniBatchSize, long maxEpochs, long nMeasurements, uint randomizationSeed) + { + QcccTrainParallel(Qonvert.ToQ(parameterSource), Qonvert.ToQ(trainingSet), Qonvert.ToQ(trainingLabels), Qonvert.ToQ(trainingSchedule), + Qonvert.ToQ(validationSchedule), learningRate, tolerance, miniBatchSize, maxEpochs, nMeasurements, randomizationSeed); + } + + public long CountMisclassifications(double tolerance, IQArray> samples, IQArray knownLabels, IQArray> validationSchedule, long nMeasurements, uint randomizationSeed) + { + if (this.isTrained) + { + var sim = new QuantumSimulator(false, randomizationSeed); + return CountValidationMisses.Run(sim, tolerance, this._nQubits, samples, knownLabels, validationSchedule, this._structure, this.CachedParameters, this.Bias, nMeasurements).Result; + } + return long.MaxValue; + } + + public long CountMisclassifications(double tolerance, List samples, List knownLabels, List validationSchedule, long nMeasurements, uint randomizationSeed) + { + return CountMisclassifications(tolerance, Qonvert.ToQ(samples), Qonvert.ToQ(knownLabels), Qonvert.ToQ(validationSchedule), nMeasurements, randomizationSeed); + } + + public long CountMisclassifications(double tolerance, List samples, List knownLabels, long nMeasurements, uint randomizationSeed) + { + var validationSchedule = new List(1); + validationSchedule.Add(new long[] { 0L, 1L, ((long)(samples.Count - 1)) }); + return CountMisclassifications(tolerance, Qonvert.ToQ(samples), Qonvert.ToQ(knownLabels), Qonvert.ToQ(validationSchedule), nMeasurements, randomizationSeed); + } + + //EstimateClassificationProbabilitiesClassicalDataAdapter(samples: Double[][], schedule: Int[][], nQubits: Int, gates: Int[][], param: Double[], measCount: Int): Double[] + public double[] EstimateClassificationProbabilities(double tolerance, IQArray> samples, IQArray> schedule, long nMeasurements, uint randomizationSeed) + { + if (this.isTrained) + { + var sim = new QuantumSimulator(false, randomizationSeed); + IQArray probs = EstimateClassificationProbabilitiesClassicalDataAdapter.Run(sim, tolerance, samples, schedule, this._nQubits, this._structure, this.CachedParameters, nMeasurements).Result; + return probs.ToArray(); + } + return new double[] { -1.0 }; + } + + public double[] EstimateClassificationProbabilities(double tolerance, List samples, List schedule, long nMeasurements, uint randomizationSeed) + { + return EstimateClassificationProbabilities(tolerance, Qonvert.ToQ(samples), Qonvert.ToQ(schedule), nMeasurements, randomizationSeed); + } + + public double[] EstimateClassificationProbabilities(double tolerance, List samples, long nMeasurements, uint randomizationSeed) + { + List sched = new List(1); + sched.Add(new long[] { 0L, 1L, (long)(samples.Count - 1) }); + return EstimateClassificationProbabilities(tolerance, Qonvert.ToQ(samples), Qonvert.ToQ(sched), nMeasurements, randomizationSeed); + } + + + } //class ClassificationModel + +} diff --git a/MachineLearning/src/Runtime/Circuits.qs b/MachineLearning/src/Runtime/Circuits.qs new file mode 100644 index 00000000000..77661be4a8d --- /dev/null +++ b/MachineLearning/src/Runtime/Circuits.qs @@ -0,0 +1,1153 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Arithmetic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Preparation; + open Microsoft.Quantum.Characterization; + + /// WARNING: the downstream EstimateFrequencyA counts the frequency of Zero + + operation measureLastQubit(nQubits : Int): (Qubit[] => Result) { + let paulis = ConstantArray(nQubits, PauliI) w/ (nQubits - 1) <- PauliZ; + return Measure(paulis, _); + } + + operation _endToEndPreparation(enc: (LittleEndian => Unit is Adj + Ctl), parameters: Double[], gates: GateSequence, reg: Qubit[]): Unit is Adj + { + enc(LittleEndian(reg)); + _ApplyGates(parameters, gates, reg); + } + + operation endToEndPreparation(enc: (LittleEndian => Unit is Adj + Ctl), parameters: Double[], gates: GateSequence) : (Qubit[] => Unit is Adj) + { + return _endToEndPreparation(enc,parameters, gates, _); + } + + function collectNegativeLocs(cNegative: Int, coefficients : ComplexPolar[]) : Int[] + { + mutable negLocs = ConstantArray(cNegative, -1); + mutable nlx = 0; + for (idx in 0 .. Length(coefficients) - 1) + { + let (r,a) = (coefficients[idx])!; + if (AbsD(a - PI()) < 1E-9) { + if (nlx < cNegative) + { + set negLocs w/= nlx <- idx; + set nlx = nlx+1; + } + } + } + return negLocs; + } //collectNegativeLocs + + // NOTE: the last qubit of 'reg' in this context is the auxillary qubit used in the Hadamard test. + operation _endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, reg: Qubit[]): Unit is Adj + Ctl { + let L = Length(reg) - 1; + let g1 = _ApplyGates(param1,gates1,_); + let g2 = _ApplyGates(param2,gates2,_); + + enc(LittleEndian(reg[0..(L-1)])); + within { + H(Tail(reg)); + } apply { + (Controlled g1) ([reg[L]], reg[0..(L-1)]); + within { + X(Tail(reg)); + } apply { + (Controlled g2) ([reg[L]], reg[0..(L-1)]); + (Controlled Z) ([reg[L]], reg[(L-1)]); + } + } + } + + operation endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl),param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence) : (Qubit[] => Unit is Adj) { + return _endToEndHTcircuit(enc,param1, gates1, param2, gates2, _); + } + + operation HardamardTestPhysical(enc2: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, nQubits: Int, nMeasurements : Int): Double + { + return 1.0-EstimateFrequencyA(endToEndHTcircuit(enc2,param1,gates1,param2,gates2),measureLastQubit(nQubits), nQubits, nMeasurements); + } + + operation QubitProbPhysical(enc: (LittleEndian => Unit is Adj + Ctl), parameters: Double[], gates: GateSequence, nQubits: Int, nMeasurements : Int) + : Double { + return 1.0 - EstimateFrequencyA( + endToEndPreparation(enc,parameters,gates), + measureLastQubit(nQubits), + nQubits, + nMeasurements + ); + } + + operation CircuitResultClassical(tolerance: Double, parameters : Double[], gates: GateSequence, sample: Double[], nMeasurements: Int) : Double + { + let dL = IntAsDouble (Length(sample)); + let N = Microsoft.Quantum.Math.Ceiling(Lg(dL)); + let circEnc = NoisyInputEncoder(tolerance/IntAsDouble(Length(gates!)),sample); + let rslt = QubitProbPhysical(circEnc, parameters,gates, N, nMeasurements); + return rslt; + + } + + + /// # Summary + /// Classify one sample; the label part of the container is ignored + /// + /// # Input + /// ## measCount + /// the number of measurements used + /// + /// ## sg + /// generates quantum encoding of a subject sample (either simulated or true) + /// + /// ## param + /// circuit parameters + /// + /// ## gates + /// sequence of gates in the circuit + /// + /// ## bias + /// postselection bias of the model + /// + /// # Output + /// post-selected class label + /// + operation ClassifyOneSimulated(tolerance: Double, sample: LabeledSample, parameters : Double[], gates: GateSequence, bias: Double, nMeasurements: Int): Int + { + let dL = IntAsDouble (Length(getData(sample))); + mutable N = Microsoft.Quantum.Math.Ceiling(Lg(dL)); + let qsp = qubitSpan(gates); + if (N < qsp) + { + set N = qsp; + } + let circEnc = NoisyInputEncoder(tolerance/IntAsDouble(Length(gates!)), getData(sample)); + return bias + QubitProbPhysical(circEnc, parameters, gates, N, nMeasurements) > 0.5 ? 1 | 0; + } + + + /// # Summary + /// Quantum-lawful estimation of postselection probability of |1> + /// + /// # Input + /// ## measCount + /// the number of measurements used + /// + /// ## sg + /// generates quantum encoding of a subject sample (either simulated or true) + /// + /// ## param + /// circuit parameters + /// + /// ## gates + /// sequence of gates in the circuit + /// + /// # Output + /// the probability estimate + /// + operation CircuitResult (measCount: Int, sg: StateGenerator, parameters : Double[], gates: GateSequence) : Double { + + mutable countOne = 0.0; + mutable qCount = qubitSpan(gates); + if (qCount < Fst(sg!)) + { + set qCount = Fst(sg!); + } + let measIdx = qCount - 1; + let circEnc = Snd(sg!); + for (ep in 1..measCount) + { + using (qubits = Qubit[qCount]) + { + //let circEnc = InputEncoder(coefficients); //usage insights + //let qubitsBE = LittleEndian(qubits); + circEnc(LittleEndian(qubits)); + _ApplyGates(parameters, gates, qubits); + //dumpRegisterToConsole(qubits); + + let rslt = M(qubits[measIdx]); + if (rslt == One) + { + set countOne = countOne + 1.0; + } + + for(i in 0..qCount-1) + { + Set(Zero, qubits[i]); + } + } + } + + // Return number of times we saw a |1> + return countOne/IntAsDouble (measCount); + + } + + /// # Summary + /// Classify one sample represented as a state generator + /// + /// # Input + /// ## measCount + /// the number of measurements used + /// + /// ## sg + /// generates quantum encoding of a subject sample (either simulated or true) + /// + /// ## param + /// circuit parameters + /// + /// ## gates + /// sequence of gates in the circuit + /// + /// ## bias + /// postselection bias of the model + /// + /// # Output + /// post-selected class label + /// + operation ClassifyOne (measCount: Int, sg: StateGenerator, parameters : Double[], gates: GateSequence, bias: Double) : (Int) + { + return CircuitResult(measCount,sg,parameters,gates)+bias > 0.5 ? 1 | 0; + } + + + /// # Summary + /// polymorphic classical/quantum gradient estimator + /// + /// # Input + /// ## param + /// circuit parameters + /// + /// ## gates + /// sequence of gates in the circuits + /// + /// ## sg + /// generates quantum encoding of a subject sample (either simulated or true) + /// + /// ## measCount + /// number of true quantum measurements to estimate probabilities. + /// IMPORTANT: measCount==0 implies simulator deployment + /// + /// # Output + /// the gradient + /// + operation EstimateGradient(param : Double[], gates: GateSequence, sg: StateGenerator, nMeasurements : Int) : (Double[]) { + //Synopsis: Suppose (param,gates) define Circ0 + //Suppose (param1,gates1) define Circ1 that implements one-gate derivative of Circ0 + //The expectation derivative is then 2 Re[] = + // Re[] - Re[] + //We observe SEE THEORY that for (Circ1)=(Circ0)' , Re[]==0 + //Thus we are left to compute Re[] = + // 1 - 1/2 < (Z \otimes Id) Circ0 psi - Circ1 psi | (Z \otimes Id) Circ0 psi - Circ1 psi> + //i.e., 1 - HadamardTestResultHack(Circ1,[Z],Circ0) + + + //Now, suppose a gate at which we differentiate is the (Controlled R(\theta))([k0,k1,...,kr],[target]) + //and we want a unitary description of its \theta-derivative. It can be written as + // 1/2 {(Controlled R(\theta'))([k0,k1,...,kr],[target]) - (Controlled Z)([k1,...,kr],[k0])(Controlled R(\theta'))([k0,k1,...,kr],[target])} + let pC = Length(param); + mutable grad = ConstantArray(pC, 0.0); + mutable paramShift = param + [0.0]; + // let sqNorm0 = CircuitResultHack(param, gates, register); + let nQubits = MaxI(NQubitsRequired(gates), sg::NQubits); + + for (gate in gates!) { + set paramShift w/= gate::Index <- (param[gate::Index] + PI()); //Shift the corresponding parameter + // NB: This the *antiderivative* of the bracket + let newDer = 2.0 * HardamardTestPhysical( + sg::Apply, param, gates, paramShift, gates, nQubits + 1, nMeasurements + ) - 1.0; + if (IsEmpty(gate::Span::ControlIndices)) { + //uncontrolled gate + set grad w/= gate::Index <- grad[gate::Index] + newDer; + } else { + //controlled gate + set paramShift w/=gate::Index<-(param[gate::Index]+3.0 * PI()); + //Assumption: any rotation R has the property that R(\theta+2 Pi)=(-1).R(\theta) + // NB: This the *antiderivative* of the bracket + let newDer1 = 2.0 * HardamardTestPhysical( + sg::Apply, param, gates, paramShift, gates, nQubits + 1, + nMeasurements + ) - 1.0; + set grad w/= gate::Index <- (grad[gate::Index] + 0.5* (newDer - newDer1)); + set paramShift w/= gate::Index <-( param[gate::Index] + PI()); //unshift by 2 Pi (for debugging purposes) + } + set paramShift w/= gate::Index <- param[gate::Index]; //unshift this parameter + } + return grad; + + } //GradientHack + + + /// # Summary + /// computes stochastic gradient on one classical sample + /// + /// # Input + /// ## param + /// circuit parameters + /// + /// ## gates + /// sequence of gates in the circuits + /// + /// ## sample + /// sample vector as a raw array + /// + /// ## nMeasurements + /// number of true quantum measurements to estimate probabilities + /// + /// # Output + /// the gradient + /// + operation EstimateGradientFromClassicalSample(tolerance: Double, param : Double[], gates: GateSequence, sample: Double[], nMeasurements : Int) : (Double[]) { + let nQubits = MaxI(FeatureRegisterSize(sample), NQubitsRequired(gates)); + let circEnc = NoisyInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); + let sg = StateGenerator(nQubits, circEnc); + return EstimateGradient(param, gates, sg, nMeasurements); + } + + //Csharp-frendly adapter for gradient estimation + //'gates' is a array of "flattened" controlled rotation defitions + //each such definition is Int[no.controls+3] in the format [parameter index, Pauli index, target index <,control qubit indices>] + //Pauli index is: 0 for I, 1 for X, 2 for y, 3 for Z + //target index is the index of the target qubit of the rotation + //Sequence of can be empty for uncontroled + operation GradientClassicalSimulationAdapter(tolerance: Double, param : Double[], gates: Int[][], sample: Double[]) : (Double[]) + { + + return EstimateGradientFromClassicalSample(tolerance, param,unFlattenGateSequence(gates),sample,0); + + } + + /// # Summary + /// Get a list of all the classification probabilities. In the from of (prob1,label) pairs. THIS operation is IN DEPRECATION + /// + /// # Input + /// ## samples + /// a container of labeled samples + /// + /// ## sched + /// a schedule to define a subset of samples + /// + /// ## param + /// parameters of the circuits + /// + /// ## gates + /// the sequence of gates in the circuit + /// + /// ## measCount + /// the maximum number of quantum measurements used in the probability estimation + /// IMPORTANT: measCount==0 implies deployment to simulator + /// + /// # Output + /// (no.hits, no.misses) pair + /// + operation ClassificationProbabilitiesClassicalData(samples: LabeledSample[], sched: SamplingSchedule, param: Double[], gates: GateSequence, measCount: Int): + (Double,Int)[] + { + mutable ret = [(0.0,0)]; + mutable sC = 0; + for (rg in sched!) + { + for (ix in rg) + { + set sC += 1; + } + } + mutable N = qubitSpan(gates); + if (Length(samples)>0) + { + let dL =Microsoft.Quantum.Math.Ceiling(Lg(IntAsDouble (Length(getData(Head(samples)))))); + if (N < dL) + { + set N = dL; + } + } + set ret = new (Double,Int)[sC]; + mutable ir = 0; + for (rg in sched!) { + for (ix in rg) { + let samp = samples[ix]; + //agnostic w.r.t. simulator (may still be simulable) + let prob1 = CircuitResultClassical(1E-12,param, gates, getData(samp),measCount); + set ret w/= ir <- (prob1, getLabel(samp)); + set ir += 1; + } + } + + return ret; + } + + + + /// # Summary + /// Get a list of all the classification probabilities. In the from of (prob1,label) pairs. THIS operation is IN DEPRECATION + /// + /// # Input + /// ## samples + /// a container of labeled samples + /// + /// ## sched + /// a schedule to define a subset of samples + /// + /// ## nQubits + /// number of cubits in the classification circuit + /// + /// ## gates + /// the sequence of gates in the circuit + /// + /// ## param + /// parameters of the circuits + /// + /// ## measCount + /// + /// # Output + /// array of corresponding estimated probabilities of the top class label + /// + operation EstimateClassificationProbabilitiesClassicalData( + tolerance : Double, samples : Double[][], sched : SamplingSchedule, + nQubits : Int, gates : GateSequence, param : Double[], + nMeasurements : Int + ) : Double[] { + let effectiveTolerance = tolerance / IntAsDouble(Length(gates!)); + mutable ret = new Double[0]; + for (rg in sched!) { + for (ix in rg) { + let samp = samples[ix]; + let circEnc = NoisyInputEncoder(effectiveTolerance, samp); + set ret += [QubitProbPhysical(circEnc, param, gates, nQubits, nMeasurements)]; + } + } + + return ret; + } //EstimateClassificationProbabilitiesClassicalData + + + operation EstimateClassificationProbabilitiesClassicalDataAdapter(tolerance: Double, samples: Double[][], schedule: Int[][], nQubits: Int, gates: Int[][], param: Double[], measCount: Int): Double[] + { + return EstimateClassificationProbabilitiesClassicalData(tolerance, samples, unFlattenSchedule(schedule), nQubits, unFlattenGateSequence(gates), param, measCount); + } + + operation PrepareUniformSuperpositionLE(reg : LittleEndian) : Unit is Adj + Ctl { + ApplyToEachCA(H, reg!); + } + + /// # Summary + /// Get a list of all the classification probabilities. In the from of (prob1,label) pairs. + /// + /// # Input + /// ## samples + /// a container of labeled samples + /// + /// ## sched + /// a schedule to define a subset of samples + /// + /// ## param + /// parameters of the circuits + /// + /// ## gates + /// the sequence of gates in the circuit + /// + /// ## measCount + /// the maximum number of quantum measurements used in the probability estimation + /// IMPORTANT: measCount==0 implies deployment to simulator + /// + /// # Output + /// List if triplets of the form (sample index, sample probaility, sample label) + /// + operation ClassificationTripletsClassicalData(samples: LabeledSample[], sched: SamplingSchedule, param: Double[], gates: GateSequence, measCount: Int): + (Int, Double, Int)[] + { + mutable ret = [(-1,0.0,0)]; + mutable sC = 0; + for (rg in sched!) + { + for (ix in rg) + { + set sC = sC +1; + } + } + mutable N = qubitSpan(gates); + if (not IsEmpty(samples)) { + let dL =Microsoft.Quantum.Math.Ceiling(Lg(IntAsDouble (Length(getData(Head(samples)))))); + if (N < dL) + { + set N = dL; + } + } + set ret = new (Int,Double,Int)[sC]; + mutable ir = 0; + for (rg in sched!) + { + for (ix in rg) + { + let samp = samples[ix]; + let data = getData(samp); + let circEnc = InputEncoder(data); + let sg = StateGenerator((N,circEnc)); + let prob1 = CircuitResult(measCount, sg, param, gates); + set ret w/=ir<-(ix,prob1,getLabel(samp)); + set ir = ir+1; + } + } + + return ret; + } + + /// # Summary + /// tallies hits and misses off a list of probability estimates + /// + /// # Input + /// ## pls + /// a list of estimated probabilities with the corresponding class labels + /// + /// ## bias + /// bias on record + /// + /// # Output + /// (no.hits, no.misses) pair + /// + function TallyHitsMisses(pls: (Double, Int)[], bias: Double) : (Int, Int) { + mutable hits = 0; + mutable misses = 0; + for (pl in pls) + { + if (Fst(pl)+bias>0.5) + { + if (Snd(pl)<1) + { + //Misclassification + set misses=misses+1; + } + else + { + set hits=hits+1; + } + } + else + { + if (Snd(pl)>0) + { + //Misclassification + set misses=misses+1; + } + else + { + set hits=hits+1; + } + } + } + return (hits,misses); + } + + /// # Summary + /// generate a flat list of sample indices where mispredictions occur + /// + /// # Input + /// ## sched + /// a sampling schedule + /// + /// ## pls + /// a list of estimated probabilities with the corresponding class labels + /// + /// ## bias + /// bias on record + /// + /// # Output + /// the list of indices where mispredictions occur + /// + function MissLocations(sched : SamplingSchedule, pls : (Double, Int)[], bias: Double) : Int[] { + mutable ret = new Int[0]; + mutable ir = 0; + + for (rg in sched!) { + for (ix in rg) { + let (prob1, lab) = pls[ir]; + set ir += 1; + if (prob1 + bias > 0.5) { + if (lab < 1) { + set ret += [ix]; + } + } else { + if (lab > 0) { + set ret += [ix]; + } + } + } + } + return ret; + } + + /// # Summary + /// C#-friendly adapter to misclassification tally + /// + /// # Input + /// ## vectors + /// data vectors in flat encoding + /// + /// ## labels + /// array of corresponding class lables + /// + /// ## schedule + /// flat representation of index subset on which the circuit is scored + /// + /// ## param + /// circuit parameters + /// + /// ## gateStructure + /// gate structure in flat representation + /// + /// ## bias + /// prediction bias to be tested + /// + /// ## measCount + /// maximum number of quantum measurements per estimation (measCount==0 implies simulator deployment) + /// + /// # Output + /// the number of misclassifications + /// + operation MisclassificationScoreAdapter(vectors: Double[][], labels: Int[], schedule: Int[][], param: Double[], gateStructure: Int[][], bias: Double, measCount: Int) : Int { + mutable misses = 0; + let samples = unFlattenLabeledSamples(vectors,labels); + let gates = unFlattenGateSequence(gateStructure); + let sched = unFlattenSchedule(schedule); + + let pls = ClassificationProbabilitiesClassicalData(samples,sched,param,gates,measCount); + let biasCurrent = adjustBias(pls, bias, 0.01, 10); + let (h1,m1) = TallyHitsMisses(pls,biasCurrent); + return m1; + } + + + /// # Summary + /// C#-friendly adapter to misclassification tally + /// + /// # Input + /// ## vectors + /// data vectors in flat encoding + /// + /// ## labels + /// array of corresponding class lables + /// + /// ## schedule + /// flat representation of index subset on which the circuit is scored + /// + /// ## param + /// circuit parameters + /// + /// ## gateStructure + /// gate structure in flat representation + /// + /// ## bias + /// prediction bias to be tested + /// + /// ## measCount + /// maximum number of quantum measurements per estimation (measCount==0 implies simulator deployment) + /// + /// # Output + /// schedule of indices of misclassified samples + /// + operation MisclassificationsAsScheduleAdapter(vectors: Double[][], labels: Int[], schedule: Int[][], param: Double[], gateStructure: Int[][], bias: Double, measCount: Int) : Int[][] + { + mutable misses = new Int[][0]; + let samples = unFlattenLabeledSamples(vectors,labels); + let gates = unFlattenGateSequence(gateStructure); + let sched = unFlattenSchedule(schedule); + + let pls = ClassificationTripletsClassicalData(samples,sched,param,gates,measCount); + mutable tmp = new (Double,Int)[Length(pls)]; + for (it in 0..(Length(tmp)-1)) + { + let (a,b,c) = pls[it]; + set tmp w/=it<-(b,c); + } + let biasCurrent = adjustBias(tmp, bias, 0.01, 10); + for (pl in pls) + { + let (ix,pp,lb) = pl; + if (pp+biasCurrent>0.5) + { + if (lb <1) + { + //Misclassification + set misses=misses + [[ix,1,ix]]; + } + } + else + { + if (lb>0) + { + //Misclassification + set misses=misses + [[ix,1,ix]]; + } + } + } + return misses; + } + + + /// # Summary + /// C#-friendly adapter to misclassification tally + /// + /// # Input + /// ## vectors + /// data vectors in flat encoding + /// + /// ## labels + /// array of corresponding class lables + /// + /// ## schedule + /// flat representation of index subset on which the circuit is scored + /// + /// ## param + /// circuit parameters + /// + /// ## gateStructure + /// gate structure in flat representation + /// + /// ## bias + /// prediction bias to be tested + /// + /// ## measCount + /// maximum number of quantum measurements per estimation (measCount==0 implies simulator deployment) + /// + /// # Output + /// schedule of indices of misclassified samples + /// + operation TestMisclassificationsAsScheduleAdapter(vectors: Double[][], labels: Int[], schedule: Int[][], param: Double[], gateStructure: Int[][], bias: Double, measCount: Int) : Int[][] + { + mutable misses = new Int[][0]; + let samples = unFlattenLabeledSamples(vectors,labels); + let gates = unFlattenGateSequence(gateStructure); + let sched = unFlattenSchedule(schedule); + + let pls = ClassificationTripletsClassicalData(samples,sched,param,gates,measCount); + mutable tmp = new (Double,Int)[Length(pls)]; + for (it in 0..(Length(tmp)-1)) + { + let (a,b,c) = pls[it]; + set tmp w/=it<-(b,c); + } + let biasCurrent = bias; + for (pl in pls) + { + let (ix,pp,lb) = pl; + if (pp+biasCurrent>0.5) + { + if (lb <1) + { + //Misclassification + set misses=misses + [[ix,1,ix]]; + } + } + else + { + if (lb>0) + { + //Misclassification + set misses=misses + [[ix,1,ix]]; + } + } + } + return misses; + } + + + + /// # Summary + /// C#-friendly adapter to misclassification tally + /// + /// # Input + /// ## vectors + /// data vectors in flat encoding + /// + /// ## labels + /// array of corresponding class lables + /// + /// ## schedule + /// flat representation of index subset on which the circuit is scored + /// + /// ## param + /// circuit parameters + /// + /// ## gateStructure + /// gate structure in flat representation + /// + /// ## bias + /// prediction bias to be tested + /// + /// ## measCount + /// maximum number of quantum measurements per estimation (measCount==0 implies simulator deployment) + /// + /// # Output + /// (bias, schedule of indices of misclassified samples) + /// + operation BiasAndMisclassificationsAsScheduleAdapter(vectors: Double[][], labels: Int[], schedule: Int[][], param: Double[], gateStructure: Int[][], bias: Double, measCount: Int) : (Double,Int[][]) + { + mutable misses = new Int[][0]; + let samples = unFlattenLabeledSamples(vectors,labels); + let gates = unFlattenGateSequence(gateStructure); + let sched = unFlattenSchedule(schedule); + + let pls = ClassificationTripletsClassicalData(samples,sched,param,gates,measCount); + mutable tmp = new (Double,Int)[Length(pls)]; + for (it in 0..(Length(tmp)-1)) + { + let (a,b,c) = pls[it]; + set tmp w/=it<-(b,c); + } + let biasCurrent = adjustBias(tmp, bias, 0.01, 10); + for (pl in pls) + { + let (ix,pp,lb) = pl; + if (pp+biasCurrent>0.5) + { + if (lb <1) + { + //Misclassification + set misses=misses + [[ix,1,ix]]; + } + } + else + { + if (lb>0) + { + //Misclassification + set misses=misses + [[ix,1,ix]]; + } + } + } + return (biasCurrent, misses); + } + + + /// # Summary + /// Semi-greedily find a bias value that leads to near-minimum misclassification score + /// + operation recomputeBias(probabilities: Double[], labels: Int[], sched: SamplingSchedule, bias: Double, tolerance: Double, maxIter: Int) : Double + { + mutable min1 = 1.0; + mutable max0 = 0.0; + mutable ipro = 0; + for (rg in sched!) + { + for(ix in rg) + { + let prob = probabilities[ipro]; + let lab = labels[ix]; + if (lab > 0) + { + if (min1 > prob) + { + set min1 = prob; + } + } + else + { + if (max0 < prob) + { + set max0 = prob; + } + } + set ipro = ipro +1 ; + } + } //rof + if (max0 <= min1) + { + return 0.5*(1.0-max0-min1); //Gives a perfect classification + } + mutable mBest = Length(probabilities); + mutable bBest = bias; + mutable bLeft = 0.5-max0; + mutable bRight = 0.5-min1; + mutable bestDir = 0; + mutable proposedLabels = InferredLabels(probabilities,bLeft); + mutable mLeft = NMismatches(proposedLabels, labels, sched); + if (mLeft < mBest) + { + set bBest = bLeft; + set mBest = mLeft; + set bestDir = -1; + } + set proposedLabels = InferredLabels(probabilities,bRight); + mutable mRight = NMismatches(proposedLabels, labels, sched); + if (mRight < mBest) + { + set bBest = bRight; + set mBest = mRight; + set bestDir = 1; + } + + for (iter in 1..maxIter) + { + if ((bRight - bLeft) < tolerance) + { + return bBest; + } + let bMiddle = 0.5*(bLeft+bRight); + set proposedLabels = InferredLabels(probabilities,bMiddle); + let mMiddle = NMismatches(proposedLabels, labels, sched); + + if (mMiddle < mLeft) + { + if (bestDir > 0) //replace the weaker end + { + set bLeft = bMiddle; + set mLeft = mMiddle; + + if (mMiddle < mBest) + { + set bBest = bMiddle; + set mBest = mMiddle; + set bestDir = -1; //note that the left end is now better + } + } + else //right end was the weaker end + { + set bRight = bMiddle; + set mRight = mMiddle; + if (mMiddle < mBest) + { + set bBest = bMiddle; + set mBest = mMiddle; + set bestDir = 1; //note that the right end is now better + } + } + //Done with the left end + } + else + { + + if (mMiddle < mRight) + { + //We are better than the right but worse than the left + //Hence the right must be weaker + set bRight = bMiddle; + set mRight = mMiddle; + } + else + { + return bBest; //cannot continue the greedy search + } + } + + } + return bias; + } //recomputeBias + + /// # Summary + /// Semi-greedily find a bias value that leads to near-minimum misclassification score + /// + /// # Input + /// ## pls + /// a plist of probability estimates and corresponding labels + /// + /// ## bias + /// a fallback value of bias + /// + /// ## tol + /// acceptable tolerance in the bias estimate + /// + /// ## maxIter + /// maximum number of trial bisections + /// + /// # Output + /// the bias estimate + /// + function adjustBias(pls: (Double,Int)[], bias: Double, tol:Double, maxIter: Int) : Double + { + mutable min1 = 1.0; + mutable max0 = 0.0; + for (pl in pls) + { + if (Snd(pl)>0) + { + if (min1 > Fst(pl)) + { + set min1 = Fst(pl); + } + } + else + { + if (max0 < Fst(pl)) + { + set max0 = Fst(pl); + } + } + } + if (max0 <= min1) + { + return 0.5*(1.0-max0-min1); //Gives a perfect classification + } + mutable hBest = 0; + mutable mBest = Length(pls); + mutable bBest = bias; + mutable bLeft = 0.5-max0; + mutable bRight = 0.5-min1; + mutable bestDir = 0; + mutable (hLeft,mLeft) = TallyHitsMisses(pls,bLeft); + if (mLeft < mBest) + { + set bBest = bLeft; + set hBest = hLeft; + set mBest = mLeft; + set bestDir = -1; + } + mutable (hRight, mRight) = TallyHitsMisses(pls,bRight); + + if (mRight < mBest) + { + set bBest = bRight; + set hBest = hRight; + set mBest = mRight; + set bestDir = 1; + } + for (iter in 1..maxIter) + { + if ((bRight - bLeft) 0) //replace the weaker end + { + set bLeft = bMiddle; + set hLeft = hMiddle; + set mLeft = mMiddle; + + if (mMiddle * hBest < hMiddle * mBest) + { + set bBest = bMiddle; + set hBest = hMiddle; + set mBest = mMiddle; + set bestDir = -1; //note that the left end is now better + } + } + else //right end was the weaker end + { + set bRight = bMiddle; + set hRight = hMiddle; + set mRight = mMiddle; + if (mMiddle * hBest < hMiddle * mBest) + { + set bBest = bMiddle; + set hBest = hMiddle; + set mBest = mMiddle; + set bestDir = 1; //note that the right end is now better + } + } + //Done with the left end + } + else + { + if (mMiddle < mRight) + { + //We are better than the right but worse than the left + //Hence the right must be weaker + set bRight = bMiddle; + set hRight = hMiddle; + set mRight = mMiddle; + } + else + { + return bBest; //cannot continue the greedy search + } + } + } //rof iter + return bBest; + } //adjust bias + + /// # Summary + /// Extract a mini batch of samples and wrap the batch as a LabeledSampleContainer + /// + /// # Input + /// ## size + /// desired number of samples in the mini batch + /// + /// ## ixLoc + /// starting index for the batch in the list of locations + /// + /// ## locations + /// list of indices of samples of interest + /// + /// ## samples + /// the container to extract the samples from + /// + /// # Output + /// the mini batched wrapped as a LabeledSampleContainer + /// + /// # Remarks + /// the resulting mini batch can be occasionally shorter than the requested 'size' + /// (when it falls on the tail end of the list of 'locations') + /// + function ExtractMiniBatch(size: Int, ixLoc: Int, locations: Int[], samples: LabeledSample[]): LabeledSample[] { + mutable cnt = Length(locations)-ixLoc; + if (cnt > size) + { + set cnt = size; + } + mutable rgSamples = new LabeledSample[0]; + if (cnt > 0) + { + set rgSamples = new LabeledSample[cnt]; + for (isa in 0..(cnt-1)) + { + set rgSamples w/=isa<- samples[locations[ixLoc+isa]]; + } + } + return rgSamples; + } + + /// # Summary + /// (Randomly) inflate of deflate the source number + operation randomize(src : Double, relativeFuzz : Double) : Double { + return src * ( + 1.0 + relativeFuzz * (Random([0.5, 0.5]) > 0 ? 1.0 | -1.0) + ); + } + + + + /// Summary + /// One possible C#-friendly wrap around the StochasticTrainingLoop + /// + operation StochasticTrainingLoopPlainAdapter(vectors: Double[][], labels: Int[], sched: Int[][], schedScore: Int[][], periodScore: Int, + miniBatchSize: Int, param: Double[],gates: Int[][], bias: Double, lrate: Double, maxEpochs: Int, tol: Double, measCount: Int ) : Double[] // + { + let samples = unFlattenLabeledSamples(vectors,labels); + let sch = unFlattenSchedule(sched); + let schScore = unFlattenSchedule(sched); + let gts = unFlattenGateSequence(gates); + let ((h,m),(b,parpar)) = StochasticTrainingLoop(samples, sch, schScore, periodScore, + miniBatchSize, param, gts, bias, lrate, maxEpochs, tol, measCount); + mutable ret = new Double[Length(parpar)+3]; + set ret w/=0<-IntAsDouble (h); + set ret w/=1<-IntAsDouble (m); + set ret w/=2<-b; + for (j in 0..(Length(parpar)-1)) + { + set ret w/=(j+3)<-parpar[j]; + } + return ret; + } + + function InferredLabels(probabilities: Double[], bias: Double): Int[] { + mutable ret = new Int[Length(probabilities)]; + for (il in 0..(Length(probabilities) - 1)) { + set ret w/= il <- probabilities[il] + bias > 0.5 ? 1 | 0; + } + return ret; + } + +} diff --git a/MachineLearning/src/Runtime/Classification.qs b/MachineLearning/src/Runtime/Classification.qs new file mode 100644 index 00000000000..b254a7fc1d4 --- /dev/null +++ b/MachineLearning/src/Runtime/Classification.qs @@ -0,0 +1,69 @@ +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + + + /// # Summary + /// Using a flat description of a classification model, assign estimated probability of the top class label + /// to each vector in the test set + /// + /// # Input + /// ## nQubits + /// the number of qubits used for data encoding + /// + /// ## gates + /// flat characterization of circuit structure. Each element is [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] + /// + /// ## parameters + /// an array of circuit parameters + /// + /// ## testSet + /// the set of vectors to be labeled + /// + /// ## nMeasurenets + /// number of the measurement cycles to be used for estimation of each probability + /// + /// # Output + /// Array of estimated probabilities of top class label (for each sample in the test set) + /// + operation EstimateClassificationProbabilities(tolerance: Double, nQubits: Int, gates: Int[][], parameters: Double[], testSet: Double[][], nMeasurements: Int) : Double[] + { + let segSched = [0..1..Length(testSet)-1]; + return EstimateClassificationProbabilitiesClassicalData(tolerance, testSet, SamplingSchedule(segSched), nQubits, unFlattenGateSequence(gates), parameters, nMeasurements); + } + + /// # Summary + /// Using a flat description of a classification model, assign estimated probability of top class label + /// to each vector in the test set + /// + /// # Input + /// ## nQubits + /// the number of qubits used for data encoding + /// + /// ## gates + /// Flattened representation of classifier structure. Each element is + /// [parameterIndex, pauliCode, targetQubit, sequence of control qubits] + /// + /// ## parameters + /// an array of circuit parameters + /// + /// ## testSet + /// the set of vectors to be labeled + /// + /// ## bias + /// top class bias + /// + /// ## nMeasurenets + /// number of the measurement cycles to be used for estimation of each probability + /// + /// # Output + /// Array of predicted class labels for each sample of the test set + /// + operation DoClassification(tolerance: Double, nQubits: Int, gates: Int[][], parameters: Double[], bias: Double, testSet: Double[][], nMeasurements: Int) : Int[] + { + let probs = EstimateClassificationProbabilities(tolerance, nQubits,gates,parameters,testSet,nMeasurements); + return InferredLabels(probs, bias); + } + + +} diff --git a/MachineLearning/src/Runtime/Convert.qs b/MachineLearning/src/Runtime/Convert.qs new file mode 100644 index 00000000000..bcebf05d77e --- /dev/null +++ b/MachineLearning/src/Runtime/Convert.qs @@ -0,0 +1,63 @@ +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Math; + + function unFlattenSchedule(sc : Int[][]) : SamplingSchedule + { + mutable ret = new Range[0]; + for (flattenedRange in sc) { + set ret += [flattenedRange[0]..flattenedRange[1]..flattenedRange[2]]; + } + return SamplingSchedule(ret); + } + + function unFlattenLabeledSamples(dat:Double[][], labs:Int[]) : LabeledSample[] { + mutable cnt = MinI(Length(dat), Length(labs)); + mutable ret = new LabeledSample[cnt]; + for (j in 0..(cnt - 1)) { + set ret w/= j <- LabeledSample(dat[j], labs[j]); + } + return ret; + } + + /// Debugging prop + operation unFlattenPauli(p:Int): Pauli + { + if (p==1) + { + return PauliX; + } + if (p==2) + { + return PauliY; + } + if (p==3) + { + return PauliZ; + } + return PauliI; + } + + /// Debugging prop + /// upcasting controlled rotation in flat representation (paramIx,pauliIx,gateSpan) + operation unFlattenControlledRotation(cod:Int[]): ControlledRotation { + return ControlledRotation( + GateSpan( + cod[2], cod[3...] + ), + unFlattenPauli(cod[1]), + cod[0] + ); + } + + /// Debugging prop + operation unFlattenGateSequence(seq: Int[][]) : GateSequence { + mutable tmp = new ControlledRotation[Length(seq)]; + for (icr in 0..(Length(seq) - 1)) { + set tmp w/= icr <- unFlattenControlledRotation(seq[icr]); + } + return GateSequence(tmp); + } + +} \ No newline at end of file diff --git a/MachineLearning/src/Runtime/Deprecated.qs b/MachineLearning/src/Runtime/Deprecated.qs new file mode 100644 index 00000000000..a27d203ba34 --- /dev/null +++ b/MachineLearning/src/Runtime/Deprecated.qs @@ -0,0 +1,84 @@ +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Logical; + open Microsoft.Quantum.Arithmetic; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Math; + + /// Sample container access method + @Deprecated("") + function getSample(samples: LabeledSampleContainer, ix: Int): LabeledSample { + return (samples!)[ix]; + } + + /// Access the raw data in a labeled sample + @Deprecated("") + function getData(samp: LabeledSample): Double[] { + return Fst(samp!); + } + + /// Access the label in a labeled sample + @Deprecated("") + function getLabel(samp:LabeledSample) : Int + { + return Snd(samp!); + } + + + /// Abstraction for a container of labeled samples + @Deprecated("") + newtype LabeledSampleContainer = LabeledSample[]; + + @Deprecated("Microsoft.Quantum.Diagnostics.DumpRegister") + function dumpRegisterToConsole ( qs: Qubit[]) : Unit + {} + //{DumpRegister((),qs);} //Swap for empty body when some dumping of registers is needed + + @Deprecated("Microsoft.Quantum.MachineLearning.NQubitsRequired") + function qubitSpan(seq : GateSequence) : Int { + return NQubitsRequired(seq); + } + + /// Set force a qubit into a desired basis state + @Deprecated("Microsoft.Quantum.Measurement.SetToBasisState") + operation Set (desired: Result, q1: Qubit) : Unit + { + //body + //{ + let current = M(q1); + if (desired != current) + { + X(q1); + } + //} + } + + @Deprecated("Microsoft.Quantum.Math.SquaredNorm") + function squareNorm(v:Double[]):Double + { + mutable ret = 0.0; + for (u in v) + { + set ret = ret + u*u; + } + return ret; + } + + @Deprecated("") // replace with ForEach. + operation randomizeArray(src:Double[], relativeFuzz: Double) : Double[] + { + mutable ret = new Double[Length(src)]; + for (ix in 0..(Length(src)-1)) + { + set ret w/=ix<-randomize(src[ix], relativeFuzz); + } + return ret; + } + + @Deprecated("Microsoft.Quantum.Math.NearlyEqualD") + function nearIdenticalDoubles(x:Double,y:Double):Bool { + return NearlyEqualD(x, y); //Note key tolerance constant here + } + + +} diff --git a/MachineLearning/src/Runtime/Examples.qs b/MachineLearning/src/Runtime/Examples.qs new file mode 100644 index 00000000000..5c9d863f18d --- /dev/null +++ b/MachineLearning/src/Runtime/Examples.qs @@ -0,0 +1,96 @@ +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Primitive; + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Math; + + operation IrisTrainingData() : LabeledSampleContainer { + let ret = + [LabeledSample(([0.581557, 0.562824, 0.447721, 0.380219], 1)), + LabeledSample(([0.570241, 0.544165, 0.503041, 0.354484], + 1)), LabeledSample(([0.510784, 0.475476, 0.453884, 0.554087], + 0)), LabeledSample(([0.492527, 0.473762, 0.471326, 0.557511], + 0)), LabeledSample(([0.543273, 0.501972, 0.518341, 0.429186], + 0)), LabeledSample(([0.520013, 0.485702, 0.440061, 0.547747], + 0)), LabeledSample(([0.585261, 0.545431, 0.462126, 0.382641], + 1)), LabeledSample(([0.541059, 0.479438, 0.568697, 0.392401], + 0)), LabeledSample(([0.555604, 0.517196, 0.474722, 0.445479], + 1)), LabeledSample(([0.592542, 0.537541, 0.468725, 0.374486], + 1)), LabeledSample(([0.552254, 0.51027, 0.511855, 0.415505], + 0)), LabeledSample(([0.530874, 0.465606, 0.503344, 0.498025], + 0)), LabeledSample(([0.568502, 0.492452, 0.524331, 0.399215], + 0)), LabeledSample(([0.511768, 0.53197, 0.46875, 0.485156], + 0)), LabeledSample(([0.555756, 0.420141, 0.553663, 0.456152], + 0)), LabeledSample(([0.584546, 0.562276, 0.439516, 0.385976], + 1)), LabeledSample(([0.608485, 0.577022, 0.427781, 0.337336], + 1)), LabeledSample(([0.546234, 0.59768, 0.46082, 0.36339], + 1)), LabeledSample(([0.596632, 0.510739, 0.482188, 0.388162], + 1)), LabeledSample(([0.512997, 0.525043, 0.460839, 0.49879], + 0)), LabeledSample(([0.477408, 0.488846, 0.465015, 0.562914], + 0)), LabeledSample(([0.553381, 0.457028, 0.546788, 0.431182], + 0)), LabeledSample(([0.543981, 0.555533, 0.491698, 0.392047], + 1)), LabeledSample(([0.532066, 0.497762, 0.5178, 0.448354], + 1)), LabeledSample(([0.505981, 0.460209, 0.506897, 0.524639], + 0)), LabeledSample(([0.44959, 0.489591, 0.490236, 0.563772], + 0)), LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], + 0)), LabeledSample(([0.552142, 0.553439, 0.474121, 0.405035], + 1)), LabeledSample(([0.495714, 0.452003, 0.497858, 0.549635], + 0)), LabeledSample(([0.523342, 0.480002, 0.484639, 0.510722], + 0)), LabeledSample(([0.493365, 0.473391, 0.504036, 0.527673], + 0)), LabeledSample(([0.552146, 0.542635, 0.505733, 0.380679], + 1)), LabeledSample(([0.578287, 0.517882, 0.46856, 0.421704], + 1)), LabeledSample(([0.588389, 0.569435, 0.47621, 0.320571], + 1)), LabeledSample(([0.572852, 0.583312, 0.441711, 0.369431], + 1)), LabeledSample(([0.540173, 0.571013, 0.440259, 0.43397], + 1)), LabeledSample(([0.588118, 0.554021, 0.452409, 0.377498], + 1)), LabeledSample(([0.499325, 0.454156, 0.500229, 0.542391], + 0)), LabeledSample(([0.541172, 0.446455, 0.491748, 0.515746], + 0)), LabeledSample(([0.501365, 0.513378, 0.488352, 0.496577], + 0)), LabeledSample(([0.519525, 0.498491, 0.475854, 0.505137], + 0)), LabeledSample(([0.549086, 0.561405, 0.474075, 0.398223], + 1)), LabeledSample(([0.504199, 0.486123, 0.476877, 0.53109], + 0)), LabeledSample(([0.530715, 0.466196, 0.504931, 0.496032], + 0)), LabeledSample(([0.515663, 0.527232, 0.474253, 0.480835], + 0)), LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], + 0)), LabeledSample(([0.591455, 0.54028, 0.471969, 0.368136], + 1)), LabeledSample(([0.459772, 0.46144, 0.462874, 0.601191], + 0)), LabeledSample(([0.527031, 0.492257, 0.472236, 0.506867], + 0)), LabeledSample(([0.534498, 0.534498, 0.495766, 0.427598], + 0)), LabeledSample(([0.561849, 0.441966, 0.530269, 0.455857], + 0)), LabeledSample(([0.483984, 0.503088, 0.458885, 0.549624], + 0)), LabeledSample(([0.525126, 0.566848, 0.450923, 0.446761], + 1)), LabeledSample(([0.576674, 0.501348, 0.480182, 0.430723], + 1)), LabeledSample(([0.58787, 0.558697, 0.451917, 0.371534], + 1)), LabeledSample(([0.584716, 0.552543, 0.446305, 0.391937], + 1)), LabeledSample(([0.604866, 0.502993, 0.484769, 0.382275], + 1)), LabeledSample(([0.576834, 0.538774, 0.469003, 0.39626], + 1)), LabeledSample(([0.588747, 0.563029, 0.444888, 0.372089], + 1)), LabeledSample(([0.575899, 0.560012, 0.4573, 0.38158], + 1)), LabeledSample(([0.552402, 0.574207, 0.444699, 0.409123], + 1)), LabeledSample(([0.589006, 0.546658, 0.46965, 0.365605], + 1)), LabeledSample(([0.540387, 0.443462, 0.537296, 0.471843], + 0)), LabeledSample(([0.570654, 0.548912, 0.458326, 0.403716], + 1)), LabeledSample(([0.544644, 0.547271, 0.467682, 0.430268], + 1)), LabeledSample(([0.525228, 0.503964, 0.508832, 0.459615], + 0)), LabeledSample(([0.462827, 0.527655, 0.461528, 0.542553], + 0)), LabeledSample(([0.50897, 0.522189, 0.507054, 0.459527], + 0)), LabeledSample(([0.546369, 0.577899, 0.460934, 0.393768], + 1)), LabeledSample(([0.615382, 0.467063, 0.492079, 0.401268], + 1)), LabeledSample(([0.573572, 0.473185, 0.510765, 0.431544], + 1)), LabeledSample(([0.510624, 0.60155, 0.43847, 0.430285], + 1)), LabeledSample(([0.563956, 0.532924, 0.469591, 0.421223], + 1)), LabeledSample(([0.581565, 0.592669, 0.391677, 0.396376], + 1)), LabeledSample(([0.533848, 0.501219, 0.4732, 0.489762], + 0)), LabeledSample(([0.530036, 0.577194, 0.452731, 0.425375], + 1)), LabeledSample(([0.595573, 0.439349, 0.494919, 0.455325], + 1)), LabeledSample(([0.584424, 0.557699, 0.438769, 0.393576], + 1)), LabeledSample(([0.544759, 0.441244, 0.494108, 0.514196], + 0)), LabeledSample(([0.552072, 0.545641, 0.487013, 0.400388], 1)) + ]; + return LabeledSampleContainer(ret); + } + + operation Examples () : Unit + { + + } +} diff --git a/MachineLearning/src/Runtime/Features.qs b/MachineLearning/src/Runtime/Features.qs new file mode 100644 index 00000000000..94b882868d0 --- /dev/null +++ b/MachineLearning/src/Runtime/Features.qs @@ -0,0 +1,9 @@ +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Convert; + + function FeatureRegisterSize(sample : Double[]) : Int { + return Ceiling(Lg(IntAsDouble(Length(sample)))); + } + +} diff --git a/MachineLearning/src/Runtime/InputEncoding.qs b/MachineLearning/src/Runtime/InputEncoding.qs new file mode 100644 index 00000000000..d7f147e1902 --- /dev/null +++ b/MachineLearning/src/Runtime/InputEncoding.qs @@ -0,0 +1,112 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Arithmetic; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + + function _CanApplyTwoQubitCase(datum: Double[]) : Bool { + return((Length(datum)==4) and (Microsoft.Quantum.Math.AbsD(datum[0]*datum[3]-datum[1]*datum[2])< 1E-12) and (Microsoft.Quantum.Math.AbsD(datum[0])> 1E-4)); + } + + operation _ApplyTwoQubitCase(datum: Double[], reg: LittleEndian) : Unit is Adj + Ctl { + let x = datum[1]/datum[0]; + let y = datum[2]/datum[0]; + // we now encoding [1,x,y,x*y] + let ax = 2.0 * ArcTan(x); + let ay = 2.0 * ArcTan(y); + R(PauliY, ay, (reg!)[1]); + R(PauliY, ax, (reg!)[0]); + } + + function _Unnegate(negLocs: Int[], coefficients : ComplexPolar[]) : ComplexPolar[] { + mutable ret = coefficients; + for (idxNegative in negLocs) { + let jx = negLocs[idxNegative]; + let coefficient = coefficients[jx]; + set ret w/= jx <- ComplexPolar(coefficient::Magnitude, 0.0); + } + return ret; + } + + /// Do special processing on the first cNegative entries + operation _EncodeSparseNegativeInput(cNegative: Int, tolerance: Double,coefficients : ComplexPolar[], reg: LittleEndian): Unit is Adj + Ctl + { + let negLocs = collectNegativeLocs(cNegative, coefficients); + // Prepare the state disregarding the sign of negative components. + NoisyPrepareArbitraryState(tolerance, _Unnegate(negLocs, coefficients), reg); + // Reflect about the negative coefficients to apply the negative signs + // at the end. + for (ineg in 0..(cNegative - 1)) { + let jx = negLocs[ineg]; + if (jx > -1) { + ReflectAboutInteger(jx, reg); //TODO:REVIEW: this assumes that 2^Length(reg) is the minimal pad to Length(coefficients) + } + } + } + + function NoisyInputEncoder(tolerance: Double,coefficients : Double[]) : (LittleEndian => Unit is Adj + Ctl) { + //First quantize the coefficients: for a coef x find such y*tolerance, where y is integer and |x-y*tolerance| \neq tolerance/2 + let nCoefficients = Length(coefficients); + mutable coefficientsComplexPolar = new ComplexPolar[nCoefficients]; + mutable cNegative = 0; + for (idx in 0 .. nCoefficients - 1) { + mutable coef = coefficients[idx]; + if (tolerance > 1E-9) { + set coef = tolerance * IntAsDouble(Round(coefficients[idx] / tolerance)); //quantization + } + mutable ang = 0.0; + if (coef < 0.0) { + set cNegative += 1; + set coef = -coef; + set ang = PI(); + } + set coefficientsComplexPolar w/= idx <- ComplexPolar(coef, ang); + } + + // Check if we can apply the explicit two-qubit case. + if (_CanApplyTwoQubitCase(coefficients)) { + return _ApplyTwoQubitCase(coefficients, _); + } + // If not, we may be able to use a special protocol in the case that + // there are only a few negative coefficients. + // Here, by a "few," we mean fewer than the number of qubits required + // to encode features. + if ((cNegative > 0) and (IntAsDouble(cNegative) < Lg(IntAsDouble(Length(coefficients))) + 1.0)) { + return _EncodeSparseNegativeInput(cNegative, tolerance, coefficientsComplexPolar, _); //TODO:MORE:ACCEPTANCE ("Wines" passing soi far) + } + + // Finally, we fall back to arbitrary state preparation. + return NoisyPrepareArbitraryState(tolerance, coefficientsComplexPolar, _); + } //EncodeNoisyInput + + //TODO:REVIEW: Design consideration! The implicit qubit count must be read off from the state encoder, NOT from the gate sequence! + + /// Create amplitude encoding of an array of real-valued coefficients + /// The vector of 'coefficients' does not have to be unitary + function InputEncoder(coefficients : Double[]): (LittleEndian => Unit is Adj + Ctl) { + //default implementation, does not respect sparcity + let nCoefficients = Length(coefficients); + mutable coefficientsComplexPolar = new ComplexPolar[nCoefficients]; + mutable allPositive = true; + for (idx in 0 .. nCoefficients - 1) { + mutable coef = coefficients[idx]; + mutable ang = 0.0; + if (coef < 0.0) + { + set allPositive = false; + set coef = -coef; + set ang =Microsoft.Quantum.Math.PI(); + } + set coefficientsComplexPolar w/= idx<-ComplexPolar(coef,ang); + } + if (_CanApplyTwoQubitCase(coefficients)) { + return _ApplyTwoQubitCase(coefficients,_); + } + return NoisyPrepareArbitraryState(1E-12, coefficientsComplexPolar, _); //this is preparing the state almost exactly so far + } + +} \ No newline at end of file diff --git a/MachineLearning/src/Runtime/Properties/NamespaceInfo.qs b/MachineLearning/src/Runtime/Properties/NamespaceInfo.qs new file mode 100644 index 00000000000..3f9fc15eb88 --- /dev/null +++ b/MachineLearning/src/Runtime/Properties/NamespaceInfo.qs @@ -0,0 +1,11 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/// # Summary +/// TODO +/// +/// # References +/// - Maria Schuld, Alex Bocharov, Krysta Svore, Nathan Wiebe, +/// "Circuit-centric quantum classifiers", +/// https://arxiv.org/abs/1804.00633. +namespace Microsoft.Quantum.MachineLearning { } diff --git a/MachineLearning/src/Runtime/RotationSequences.qs b/MachineLearning/src/Runtime/RotationSequences.qs new file mode 100644 index 00000000000..1687d8c99dc --- /dev/null +++ b/MachineLearning/src/Runtime/RotationSequences.qs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + + /// What is the minimum number of qubits + /// to support the subject gate sequence? + /// Find the maximum qubit index m occuring + /// in a gate sequence and return m+1 + function NQubitsRequired(seq : GateSequence) : Int { + mutable nQubitsRequired = 0; + for (gate in seq!) { + set nQubitsRequired = Fold( + MaxI, 0, + gate::Span::ControlIndices + [ + gate::Span::TargetIndex, + nQubitsRequired + ] + ); + } + return nQubitsRequired; + } + + /// Apply parameterized gate sequence to subject qubit register + /// + operation _ApplyGates(parameters : Double[], gates: GateSequence, qubits : Qubit[]) : (Unit) is Adj + Ctl { + //dumpRegisterToConsole(qubits); + for (gate in gates!) { + // let (gsp,p,ix) = gt!; + if (gate::Index < Length(parameters)) { + let input = (gate::Axis, parameters[gate::Index], qubits[gate::Span::TargetIndex]); + if (IsEmpty(gate::Span::ControlIndices)) { + // Uncontrolled rotation of target + R(input); + } else { + //TODO: should one validate the control indices first? + (Controlled R)(Subarray(gate::Span::ControlIndices, qubits), input); + } + } + } + } + + operation ApplyGates(parameters : Double[], gates: GateSequence): (Qubit[] => Unit is Adj + Ctl) { + return _ApplyGates(parameters,gates,_); + } + +} diff --git a/MachineLearning/src/Runtime/Runtime.csproj b/MachineLearning/src/Runtime/Runtime.csproj new file mode 100644 index 00000000000..51305f555db --- /dev/null +++ b/MachineLearning/src/Runtime/Runtime.csproj @@ -0,0 +1,15 @@ + + + netstandard2.1 + x64 + + + + + + + + + + + diff --git a/MachineLearning/src/Runtime/SpecialMultiplexor.qs b/MachineLearning/src/Runtime/SpecialMultiplexor.qs new file mode 100644 index 00000000000..a95ea0f58ec --- /dev/null +++ b/MachineLearning/src/Runtime/SpecialMultiplexor.qs @@ -0,0 +1,252 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Canon { + open Microsoft.Quantum.Arithmetic; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Math; + + /// # Summary + /// Applies a Pauli rotation conditioned on an array of qubits. + /// + /// This applies the multiply-controlled unitary operation $U$ that performs + /// rotations by angle $\theta_j$ about single-qubit Pauli operator $P$ + /// when controlled by the $n$-qubit number state $\ket{j}$. + /// + /// $U = \sum^{2^n-1}_{j=0}\ket{j}\bra{j}\otimes e^{i P \theta_j}$. + /// + /// # Input + /// ## tolerance + /// Coefficients under this tolerance level should be ignored + /// ## coefficients + /// Array of up to $2^n$ coefficients $\theta_j$. The $j$th coefficient + /// indexes the number state $\ket{j}$ encoded in little-endian format. + /// + /// ## pauli + /// Pauli operator $P$ that determines axis of rotation. + /// + /// ## control + /// $n$-qubit control register that encodes number states $\ket{j}$ in + /// little-endian format. + /// + /// ## target + /// Single qubit register that is rotated by $e^{i P \theta_j}$. + /// + /// # Remarks + /// `coefficients` will be padded with elements $\theta_j = 0.0$ if + /// fewer than $2^n$ are specified. + operation NoisyMultiplexPauli (tolerance: Double,coefficients : Double[], pauli : Pauli, control : LittleEndian, target : Qubit) : Unit + { + body (...) + { + if (pauli == PauliZ) + { + let op = NoisyMultiplexZ(tolerance, coefficients, control, _); + op(target); + } + elif (pauli == PauliX) + { + let op = NoisyMultiplexPauli(tolerance,coefficients, PauliZ, control, _); + ApplyWithCA(H, op, target); + } + elif (pauli == PauliY) + { + let op = NoisyMultiplexPauli(tolerance,coefficients, PauliX, control, _); + ApplyWithCA(Adjoint S, op, target); + } + elif (pauli == PauliI) + { + NoisyApplyDiagonalUnitary(tolerance,coefficients, control); + } + else + { + fail $"MultiplexPauli failed. Invalid pauli {pauli}."; + } + } + + adjoint invert; + controlled distribute; + controlled adjoint distribute; + } + + + function significantReal(tol: Double, rg:Double[]):Bool + { + for(j in 0..(Length(rg)-1)) + { + if (AbsD(rg[j])>tol) + { + return true; + } + } + return false; + } + + /// # Summary + /// Applies a Pauli Z rotation conditioned on an array of qubits. + /// + /// This applies the multiply-controlled unitary operation $U$ that performs + /// rotations by angle $\theta_j$ about single-qubit Pauli operator $Z$ + /// when controlled by the $n$-qubit number state $\ket{j}$. + /// + /// $U = \sum^{2^n-1}_{j=0}\ket{j}\bra{j}\otimes e^{i Z \theta_j}$. + /// + /// # Input + /// ## coefficients + /// Array of up to $2^n$ coefficients $\theta_j$. The $j$th coefficient + /// indexes the number state $\ket{j}$ encoded in little-endian format. + /// + /// ## control + /// $n$-qubit control register that encodes number states $\ket{j}$ in + /// little-endian format. + /// + /// ## target + /// Single qubit register that is rotated by $e^{i P \theta_j}$. + /// + /// # Remarks + /// `coefficients` will be padded with elements $\theta_j = 0.0$ if + /// fewer than $2^n$ are specified. + /// + /// # References + /// - Synthesis of Quantum Logic Circuits + /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov + /// https://arxiv.org/abs/quant-ph/0406176 + operation NoisyMultiplexZ (tolerance: Double, coefficients : Double[], control : LittleEndian, target : Qubit) : Unit + { + body (...) + { + // pad coefficients length at tail to a power of 2. + let coefficientsPadded = Padded(-2 ^ Length(control!), 0.0, coefficients); + + if (Length(coefficientsPadded) == 1) + { + // Termination case + if (AbsD(coefficientsPadded[0])> tolerance) + { + Exp([PauliZ], coefficientsPadded[0], [target]); + } + } + else + { + // Compute new coefficients. + let (coefficients0, coefficients1) = specialMultiplexZComputeCoefficients_(coefficientsPadded); + NoisyMultiplexZ(tolerance,coefficients0, LittleEndian((control!)[0 .. Length(control!) - 2]), target); + if (significantReal(tolerance,coefficients1)) + { + CNOT((control!)[Length(control!) - 1], target); + NoisyMultiplexZ(tolerance,coefficients1, LittleEndian((control!)[0 .. Length(control!) - 2]), target); + CNOT((control!)[Length(control!) - 1], target); + } + } + } + + adjoint invert; + + controlled (controlRegister, ...) + { + // pad coefficients length to a power of 2. + let coefficientsPadded = Padded(2 ^ (Length(control!) + 1), 0.0, Padded(-2 ^ Length(control!), 0.0, coefficients)); + let (coefficients0, coefficients1) = specialMultiplexZComputeCoefficients_(coefficientsPadded); + NoisyMultiplexZ(tolerance,coefficients0, control, target); + if (significantReal(tolerance,coefficients1)) + { + Controlled X(controlRegister, target); + NoisyMultiplexZ(tolerance,coefficients1, control, target); + Controlled X(controlRegister, target); + } + } + + controlled adjoint invert; + } + + + /// # Summary + /// Applies an array of complex phases to numeric basis states of a register of qubits. + /// + /// That is, this implements the diagonal unitary operation $U$ that applies a complex phase + /// $e^{i \theta_j}$ on the $n$-qubit number state $\ket{j}$. + /// + /// $U = \sum^{2^n-1}_{j=0}e^{i\theta_j}\ket{j}\bra{j}$. + /// + /// TODO: REIMPLEMENT THIS along the Welch et Bocharov lines + /// # Input + /// ## tolerance + /// Coefficients under this tolerance level should be ignored + /// ## coefficients + /// Array of up to $2^n$ coefficients $\theta_j$. The $j$th coefficient + /// indexes the number state $\ket{j}$ encoded in little-endian format. + /// + /// ## control + /// $n$-qubit control register that encodes number states $\ket{j}$ in + /// little-endian format. + /// + /// # Remarks + /// `coefficients` will be padded with elements $\theta_j = 0.0$ if + /// fewer than $2^n$ are specified. + /// + /// # References + /// - Synthesis of Quantum Logic Circuits + /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov + /// https://arxiv.org/abs/quant-ph/0406176 + operation NoisyApplyDiagonalUnitary (tolerance: Double, coefficients : Double[], qubits : LittleEndian) : Unit + { + body (...) + { + if (IsEmpty(qubits!)) { + fail "operation ApplyDiagonalUnitary -- Number of qubits must be greater than 0."; + } + + // pad coefficients length at tail to a power of 2. + let coefficientsPadded = Padded(-2 ^ Length(qubits!), 0.0, coefficients); + + // Compute new coefficients. + let (coefficients0, coefficients1) = specialMultiplexZComputeCoefficients_(coefficientsPadded); + NoisyMultiplexZ(tolerance,coefficients1, LittleEndian((qubits!)[0 .. Length(qubits!) - 2]), (qubits!)[Length(qubits!) - 1]); + + if (Length(coefficientsPadded) == 2) + { + + // Termination case + if (AbsD(coefficients0[0])>tolerance) + { + Exp([PauliI], 1.0 * coefficients0[0], qubits!); + } + } + else + { + NoisyApplyDiagonalUnitary(tolerance,coefficients0, LittleEndian((qubits!)[0 .. Length(qubits!) - 2])); + } + } + + adjoint invert; + controlled distribute; + controlled adjoint distribute; + } + + + /// # Summary + /// Implementation step of multiply-controlled Z rotations. + /// # See Also + /// - Microsoft.Quantum.Canon.MultiplexZ + function specialMultiplexZComputeCoefficients_ (coefficients : Double[]) : (Double[], Double[]) + { + let newCoefficientsLength = Length(coefficients) / 2; + mutable coefficients0 = new Double[newCoefficientsLength]; + mutable coefficients1 = new Double[newCoefficientsLength]; + + for (idxCoeff in 0 .. newCoefficientsLength - 1) + { + set coefficients0 w/= idxCoeff <- 0.5 * (coefficients[idxCoeff] + coefficients[idxCoeff + newCoefficientsLength]); + set coefficients1 w/= idxCoeff <- 0.5 * (coefficients[idxCoeff] - coefficients[idxCoeff + newCoefficientsLength]); + } + + return (coefficients0, coefficients1); + } + + + + +} + + diff --git a/MachineLearning/src/Runtime/SpecialSP.qs b/MachineLearning/src/Runtime/SpecialSP.qs new file mode 100644 index 00000000000..620e108feff --- /dev/null +++ b/MachineLearning/src/Runtime/SpecialSP.qs @@ -0,0 +1,242 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Arithmetic; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Arrays; + + // This library returns operations that prepare a specified quantum state + // from the computational basis state $\ket{0...0}$. + + /// # Summary + /// Returns an operation that prepares the given quantum state. + /// + /// The returned operation $U$ prepares an arbitrary quantum + /// state $\ket{\psi}$ with positive coefficients $\alpha_j\ge 0$ from + /// the $n$-qubit computational basis state $\ket{0...0}$. + /// + /// The action of U on a newly-allocated register is given by + /// $$ + /// \begin{align} + /// U \ket{0\cdots 0} = \ket{\psi} = \frac{\sum_{j=0}^{2^n-1}\alpha_j \ket{j}}{\sqrt{\sum_{j=0}^{2^n-1}|\alpha_j|^2}}. + /// \end{align} + /// $$ + /// + /// # Input + /// ## coefficients + /// Array of up to $2^n$ coefficients $\alpha_j$. The $j$th coefficient + /// indexes the number state $\ket{j}$ encoded in little-endian format. + /// + /// # Output + /// A state-preparation unitary operation $U$. + /// + /// # Remarks + /// Negative input coefficients $\alpha_j < 0$ will be treated as though + /// positive with value $|\alpha_j|$. `coefficients` will be padded with + /// elements $\alpha_j = 0.0$ if fewer than $2^n$ are specified. + /// + /// ## Example + /// The following snippet prepares the quantum state $\ket{\psi}=\sqrt{1/8}\ket{0}+\sqrt{7/8}\ket{2}$ + /// in the qubit register `qubitsLE`. + /// ```qsharp + /// let amplitudes = [Sqrt(0.125), 0.0, Sqrt(0.875), 0.0]; + /// let op = StatePreparationPositiveCoefficients(amplitudes); + /// using (qubits = Qubit[2]) { + /// let qubitsLE = LittleEndian(qubits); + /// op(qubitsLE); + /// } + /// ``` + function NoisyStatePreparationPositiveCoefficients (tolerance: Double, coefficients : Double[]) : (LittleEndian => Unit is Adj + Ctl) { + let nCoefficients = Length(coefficients); + mutable coefficientsComplexPolar = new ComplexPolar[nCoefficients]; + for (idx in 0 .. nCoefficients - 1) { + set coefficientsComplexPolar w/= idx <- ComplexPolar(AbsD(coefficients[idx]), 0.0); + } + return NoisyPrepareArbitraryState(tolerance, coefficientsComplexPolar, _); + } + + /// # Summary + /// Returns an operation that prepares a specific quantum state. + /// + /// The returned operation $U$ prepares an arbitrary quantum + /// state $\ket{\psi}$ with complex coefficients $r_j e^{i t_j}$ from + /// the $n$-qubit computational basis state $\ket{0...0}$. + /// + /// The action of U on a newly-allocated register is given by + /// $$ + /// \begin{align} + /// U\ket{0...0}=\ket{\psi}=\frac{\sum_{j=0}^{2^n-1}r_j e^{i t_j}\ket{j}}{\sqrt{\sum_{j=0}^{2^n-1}|r_j|^2}}. + /// \end{align} + /// $$ + /// + /// # Input + /// ## coefficients + /// Array of up to $2^n$ complex coefficients represented by their + /// absolute value and phase $(r_j, t_j)$. The $j$th coefficient + /// indexes the number state $\ket{j}$ encoded in little-endian format. + /// + /// # Output + /// A state-preparation unitary operation $U$. + /// + /// # Remarks + /// Negative input coefficients $r_j < 0$ will be treated as though + /// positive with value $|r_j|$. `coefficients` will be padded with + /// elements $(r_j, t_j) = (0.0, 0.0)$ if fewer than $2^n$ are + /// specified. + /// + /// ## Example + /// The following snippet prepares the quantum state $\ket{\psi}=e^{i 0.1}\sqrt{1/8}\ket{0}+\sqrt{7/8}\ket{2}$ + /// in the qubit register `qubitsLE`. + /// ```qsharp + /// let amplitudes = [Sqrt(0.125), 0.0, Sqrt(0.875), 0.0]; + /// let phases = [0.1, 0.0, 0.0, 0.0]; + /// mutable complexNumbers = new ComplexPolar[4]; + /// for (idx in 0..3) { + /// set complexNumbers[idx] = ComplexPolar(amplitudes[idx], phases[idx]); + /// } + /// let op = StatePreparationComplexCoefficients(complexNumbers); + /// using (qubits = Qubit[2]) { + /// let qubitsLE = LittleEndian(qubits); + /// op(qubitsLE); + /// } + /// ``` + function NoisyStatePreparationComplexCoefficients (tolerance: Double, coefficients : ComplexPolar[]) : (LittleEndian => Unit is Adj + Ctl) { + return NoisyPrepareArbitraryState(tolerance, coefficients, _); + } + + /// # Summary + /// Returns an operation that prepares a given quantum state. + /// + /// The returned operation $U$ prepares an arbitrary quantum + /// state $\ket{\psi}$ with complex coefficients $r_j e^{i t_j}$ from + /// the $n$-qubit computational basis state $\ket{0...0}$. + /// + /// $$ + /// \begin{align} + /// U\ket{0...0}=\ket{\psi}=\frac{\sum_{j=0}^{2^n-1}r_j e^{i t_j}\ket{j}}{\sqrt{\sum_{j=0}^{2^n-1}|r_j|^2}}. + /// \end{align} + /// $$ + /// + /// # Input + /// ## coefficients + /// Array of up to $2^n$ complex coefficients represented by their + /// absolute value and phase $(r_j, t_j)$. The $j$th coefficient + /// indexes the number state $\ket{j}$ encoded in little-endian format. + /// + /// ## qubits + /// Qubit register encoding number states in little-endian format. This is + /// expected to be initialized in the computational basis state + /// $\ket{0...0}$. + /// + /// # Remarks + /// Negative input coefficients $r_j < 0$ will be treated as though + /// positive with value $|r_j|$. `coefficients` will be padded with + /// elements $(r_j, t_j) = (0.0, 0.0)$ if fewer than $2^n$ are + /// specified. + /// + /// # References + /// - Synthesis of Quantum Logic Circuits + /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov + /// https://arxiv.org/abs/quant-ph/0406176 + operation NoisyPrepareArbitraryState (tolerance:Double, coefficients : ComplexPolar[], qubits : LittleEndian) : Unit is Adj + Ctl { + // pad coefficients at tail length to a power of 2. + let coefficientsPadded = Padded(-2 ^ Length(qubits!), ComplexPolar(0.0, 0.0), coefficients); + let target = (qubits!)[0]; + let op = (Adjoint _NoisyPrepareArbitraryState(tolerance,coefficientsPadded, _, _))(_, target); + op( + // Determine what controls to apply to `op`. + Length(qubits!) > 1 + ? LittleEndian((qubits!)[1 .. Length(qubits!) - 1]) + | LittleEndian(new Qubit[0]) + ); + } + + + function significantComplex(tol: Double, rg:ComplexPolar[]):Bool { + for (j in 0..(Length(rg)-1)) { + if (AbsComplexPolar(rg[j])>tol) { + return true; + } + } + return false; + } + + /// # Summary + /// Implementation step of arbitrary state preparation procedure. + /// + /// # See Also + /// - PrepareArbitraryState + /// - Microsoft.Quantum.Canon.MultiplexPauli + operation _NoisyPrepareArbitraryState(tolerance: Double, coefficients : ComplexPolar[], control : LittleEndian, target : Qubit) : Unit is Adj + Ctl { + // For each 2D block, compute disentangling single-qubit rotation parameters + let (disentanglingY, disentanglingZ, newCoefficients) = _NoisyStatePreparationSBMComputeCoefficients(coefficients); + if (significantReal(tolerance,disentanglingZ)) { + NoisyMultiplexPauli(tolerance,disentanglingZ, PauliZ, control, target); + } + if (significantReal(tolerance,disentanglingY)) { + NoisyMultiplexPauli(tolerance,disentanglingY, PauliY, control, target); + } + // target is now in |0> state up to the phase given by arg of newCoefficients. + + // Continue recursion while there are control qubits. + if (Length(control!) == 0) { + let (abs, arg) = newCoefficients[0]!; + if (AbsD(arg)> tolerance) + { + Exp([PauliI], -1.0 * arg, [target]); + } + } else { + if (significantComplex(tolerance,newCoefficients)) { + let newControl = LittleEndian((control!)[1 .. Length(control!) - 1]); + let newTarget = (control!)[0]; + _NoisyPrepareArbitraryState(tolerance,newCoefficients, newControl, newTarget); + } + } + } + + /// # Summary + /// Computes the Bloch sphere coordinates for a single-qubit state. + /// + /// Given two complex numbers $a0, a1$ that represent the qubit state, computes coordinates + /// on the Bloch sphere such that + /// $a0 \ket{0} + a1 \ket{1} = r e^{it}(e^{-i \phi /2}\cos{(\theta/2)}\ket{0}+e^{i \phi /2}\sin{(\theta/2)}\ket{1})$. + /// + /// # Input + /// ## a0 + /// Complex coefficient of state $\ket{0}$. + /// ## a1 + /// Complex coefficient of state $\ket{1}$. + /// + /// # Output + /// A tuple containing `(ComplexPolar(r, t), phi, theta)`. + function NoisyBlochSphereCoordinates (a0 : ComplexPolar, a1 : ComplexPolar) : (ComplexPolar, Double, Double) { + let abs0 = AbsComplexPolar(a0); + let abs1 = AbsComplexPolar(a1); + let arg0 = ArgComplexPolar(a0); + let arg1 = ArgComplexPolar(a1); + let r = Sqrt(abs0 * abs0 + abs1 * abs1); + let t = 0.5 * (arg0 + arg1); + let phi = arg1 - arg0; + let theta = 2.0 * ArcTan2(abs1, abs0); + return (ComplexPolar(r, t), phi, theta); + } + + /// # Summary + /// Implementation step of arbitrary state preparation procedure. + /// # See Also + /// - Microsoft.Quantum.Canon.PrepareArbitraryState + function _NoisyStatePreparationSBMComputeCoefficients (coefficients : ComplexPolar[]) : (Double[], Double[], ComplexPolar[]) { + mutable disentanglingZ = new Double[Length(coefficients) / 2]; + mutable disentanglingY = new Double[Length(coefficients) / 2]; + mutable newCoefficients = new ComplexPolar[Length(coefficients) / 2]; + for (idxCoeff in 0 .. 2 .. Length(coefficients) - 1) { + let (rt, phi, theta) = NoisyBlochSphereCoordinates(coefficients[idxCoeff], coefficients[idxCoeff + 1]); + set disentanglingZ w/= idxCoeff / 2 <- 0.5 * phi; + set disentanglingY w/= idxCoeff / 2 <- 0.5 * theta; + set newCoefficients w/= idxCoeff / 2 <- rt; + } + return (disentanglingY, disentanglingZ, newCoefficients); + } +} \ No newline at end of file diff --git a/MachineLearning/src/Runtime/Training.qs b/MachineLearning/src/Runtime/Training.qs new file mode 100644 index 00000000000..be3c4cc5abe --- /dev/null +++ b/MachineLearning/src/Runtime/Training.qs @@ -0,0 +1,413 @@ +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Logical; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + + operation TrainSequentialClassifier( + nQubits: Int, + gates: GateSequence, + parameterSource: Double[][], + samples: LabeledSample[], + trainingSchedule: SamplingSchedule, + validationSchedule: SamplingSchedule, + learningRate: Double, + tolerance: Double, + miniBatchSize: Int, + maxEpochs: Int, + nMeasurements: Int + ) : (Double[], Double) { + mutable retParam = [-1E12]; + mutable retBias = -2.0; //Indicates non-informative start + mutable bestValidation = Length(samples) + 1; + + let features = Mapped(_Features, samples); + let labels = Mapped(_Label, samples); + + let cTechnicalIter = 10; //10 iterations are sufficient for bias adjustment in most cases + for (idxStart in 0..(Length(parameterSource)-1)) { + Message($"Beginning training at start point #{idxStart}..."); + let ((h, m), (b, parpar)) = StochasticTrainingLoop( + samples, trainingSchedule, trainingSchedule, 1, miniBatchSize, + parameterSource[idxStart], gates, 0.0, learningRate, maxEpochs, + tolerance, nMeasurements + ); + let probsValidation = EstimateClassificationProbabilitiesClassicalData( + tolerance, features, validationSchedule, nQubits, + gates, parpar, nMeasurements + ); + //Estimate bias here! + let localBias = recomputeBias( + probsValidation, + labels, + validationSchedule, + 0.0, + tolerance, + cTechnicalIter + ); + let localPL = InferredLabels(probsValidation,localBias); + let localMisses = NMismatches(localPL, labels, validationSchedule); + if (bestValidation > localMisses) { + set bestValidation = localMisses; + set retParam = parpar; + set retBias = localBias; + } + + } + return (retParam, retBias); + } + + /// # Summary + /// Using a flat description of a classification model, find a good local optimum + /// for the model parameters and a related calssification bias + /// + /// # Input + /// ## nQubits + /// the number of qubits used for data encoding + /// + /// ## gates + /// flat characterization of circuit structure. Each element is [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] + /// + /// ## parameterSource + /// an array of parameter arrays, to be used as SGD starting points + /// + /// ## trainingSet + /// the set of training samples + /// + /// ## trainingLabels + /// the set of training labels + /// + /// ## trainingSchedule + /// defines a subset of training data actually used in the training process + /// + /// ## validatioSchedule + /// defines a subset of training data used for validation and computation of the *bias* + /// + /// ## learningRate + /// initial learning rate for stochastic gradient descent + /// + /// ## tolerance + /// sufficient absolute precision of parameter updates + /// + /// ## learningRate + /// initial learning rate for stochastic gradient descent + /// + /// ## miniBatchSize + /// maximum size of SGD mini batches + /// + /// ## maxEpochs + /// limit to the number of training epochs + /// + /// ## nMeasurenets + /// number of the measurement cycles to be used for estimation of each probability + /// + /// # Output + /// (Array of optimal parameters, optimal validation *bias*) + /// + operation TrainQcccSequential(nQubits: Int, gates: Int[][], parameterSource: Double[][], trainingSet: Double[][], trainingLabels: Int[], trainingSchedule: Int[][], validationSchedule: Int[][], + learningRate: Double, tolerance: Double, miniBatchSize: Int, maxEpochs: Int, nMeasurements: Int) : (Double[],Double) { + let samples = unFlattenLabeledSamples(trainingSet,trainingLabels); + let sch = unFlattenSchedule(trainingSchedule); + let schValidate = unFlattenSchedule(validationSchedule); + let gateSequence = unFlattenGateSequence(gates); + + return TrainSequentialClassifier( + nQubits, gateSequence, parameterSource, samples, + sch, schValidate, learningRate, tolerance, miniBatchSize, + maxEpochs, nMeasurements + ); + } //TrainQcccSequential + + /// # Summary + /// attempts a single parameter update in the direction of mini batch gradient + /// + /// # Input + /// ## miniBatch + /// container of labeled samples in the mini batch + /// + /// ## param + /// circuit parameters + /// + /// ## gates + /// sequence of gates in the circuits + /// + /// ## lrate + /// the learning rate + /// + /// ## measCount + /// number of true quantum measurements to estimate probabilities. + /// + /// # Output + /// (utility, (new)parameters) pair + /// + operation OneStochasticTrainingStep( + tolerance: Double, miniBatch: LabeledSample[], param: Double[], gates: GateSequence, + lrate: Double, measCount: Int + ) : (Double, Double[]) { + mutable upParam = new Double[Length(param)]; + mutable batchGradient = ConstantArray(Length(param), 0.0); + + for (samp in miniBatch) { + mutable err = IntAsDouble(samp::Label); + if (err < 1.0) { + set err = -1.0; //class 0 misclassified to class 1; strive to reduce the probability + } + let grad = EstimateGradientFromClassicalSample(tolerance, param, gates, samp::Features, measCount); + for (ip in 0..(Length(param) - 1)) { + // GradientClassicalSample actually computes antigradient, but err*grad corrects it back to gradient + set batchGradient w/= ip <- (batchGradient[ip] + lrate * err * grad[ip]); + } + + } + for (ip in 0..(Length(param)-1)) { + set upParam w/= ip <- (param[ip] + batchGradient[ip]); + } + return (SquaredNorm(batchGradient), upParam); //TODO:REVIEW: Ok to interpret utility as size of the overall move? + } + + + /// # Summary + /// Perform one epoch of circuit training on a subset of data samples to a quantum simulator + /// + /// # Input + /// ## samples + /// a container of available data samples + /// + /// ## sched + /// a schedule of the data subset for this training loop + /// + /// ## schedScore + /// defines a (possibly different) data subset on which accuracy scoring is performed + /// + /// ## periodScore + /// number of blind gradient steps between scoring points (performance tool, set to 1 for best accuracy) + /// + /// ## miniBatchSize + /// number of samples in a gradient mini batch + /// + /// ## param + /// initial parameter vector + /// + /// ## gates + /// sequence of gates in the circuit + /// + /// ## bias + /// reserved for future use; originally - initial prediction bias + /// + /// ## lrate + /// learning rate + /// + /// ## measCount + /// number of true quantum measurements to estimate probabilities. + /// + operation OneStochasticTrainingEpoch(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, + miniBatchSize: Int, param: Double[], gates: GateSequence, bias: Double, lrate: Double, tolerance: Double, measCount: Int, + h0: Int, m0: Int): ((Int,Int),(Double,Double[])) + { + let HARDCODEDmaxIter = 10; + let HARDCODEDunderage = 3; //4/26 slack greater than 3 is not recommended + + + mutable hBest = h0; + mutable mBest = m0; + mutable biasBest = bias; + + let pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); + let (h2,m2) = TallyHitsMisses(pls,biasBest); + let missLocations = MissLocations(schedScore, pls, biasBest); + + mutable paramBest = param; + mutable paramCurrent = paramBest; + mutable biasCurrent = biasBest; + + //An epoch is just an attempt to update the parameters by learning from misses based on LKG parameters + for (ixLoc in 0..miniBatchSize..(Length(missLocations) - 1)) { + let miniBatch = ExtractMiniBatch(miniBatchSize, ixLoc, missLocations, samples); + let (utility,upParam) = OneStochasticTrainingStep(tolerance, miniBatch, paramCurrent, gates, lrate, measCount); + if (Microsoft.Quantum.Math.AbsD(utility) > 0.0000001) { + //There had been some parameter update + if (utility > 0.0) { //good parameter update + set paramCurrent = upParam; + let plsCurrent = ClassificationProbabilitiesClassicalData(samples, schedScore, paramCurrent, gates, measCount); + set biasCurrent = adjustBias(plsCurrent, bias, tolerance, HARDCODEDmaxIter); + let (h1,m1) = TallyHitsMisses(plsCurrent,biasCurrent); + if (m1 < mBest + HARDCODEDunderage) { + //we allow limited non-greediness + if (m1 < mBest) { + set hBest = h1; + set mBest = m1; + set paramBest = paramCurrent; + set biasBest = biasCurrent; + } + } else { + //otherwise we scrap the parameter update + set paramCurrent = paramBest; + set biasCurrent = biasBest; + } + } + + } + + } + return ((hBest, mBest), (biasBest, paramBest)); + } + + //Make some oblivious gradien descent steps without checking the prediction quality + operation OneUncontrolledStochasticTrainingEpoch(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, + miniBatchSize: Int, param: Double[], gates: GateSequence, bias: Double, lrate: Double, tolerance: Double, measCount: Int): ((Int,Int),(Double,Double[])) + { + let HARDCODEDmaxIter = 10; //TODO:MUST: tolerance and maxIter cannot stay hardcoded + let pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); + mutable biasBest = adjustBias(pls, bias, tolerance, HARDCODEDmaxIter); + let (h0,m0) = TallyHitsMisses(pls,biasBest); // ClassificationScoreSimulated(samples, schedScore, param, gates, bias); //Deprecated + mutable hCur = h0; + mutable mCur = m0; + let missLocations = MissLocations(schedScore, pls, biasBest); + + mutable paramBest = param; + mutable paramCurrent = paramBest; + mutable biasCurrent = biasBest; + + //An epoch is just an attempt to update the parameters by learning from misses based on LKG parameters + for (ixLoc in 0..miniBatchSize..(Length(missLocations) - 1)) { + let miniBatch = ExtractMiniBatch(miniBatchSize,ixLoc,missLocations,samples); + let (utility,upParam) = OneStochasticTrainingStep(tolerance, miniBatch, paramCurrent, gates, lrate, measCount); + if (AbsD(utility) > 0.0000001) { + //There had been some parameter update + if (utility > 0.0) { //good parameter update + set paramCurrent = upParam; + let plsCurrent = ClassificationProbabilitiesClassicalData(samples, schedScore, paramCurrent, gates, measCount); + set biasCurrent = adjustBias(plsCurrent, bias, tolerance, HARDCODEDmaxIter); + let (h1,m1) = TallyHitsMisses(plsCurrent,biasCurrent); + set hCur = h1; + set mCur = m1; + } + + } + + } + return ((hCur, mCur),(biasCurrent,paramCurrent)); + } //OneUncontrolledStochasticTrainingEpoch + + /// # Summary + /// Run a full circuit training loop on a subset of data samples + /// + /// # Input + /// ## samples + /// a container of available data samples + /// + /// ## sched + /// a schedule of the data subset for this training loop + /// + /// ## schedScore + /// defines a (possibly different) data subset on which accuracy scoring is performed + /// + /// ## periodScore + /// number of blind gradient steps between scoring points (performance tool, set to 1 for best accuracy) + /// + /// ## miniBatchSize + /// number of samples in a gradient mini batch + /// + /// ## param + /// initial parameter vector + /// + /// ## gates + /// sequence of gates in the circuit + /// + /// ## bias + /// reserved for future use; originally - initial prediction bias + /// + /// ## lrate + /// learning rate + /// + /// ## maxEpochs + /// maximum number of epochs in this loop + /// + /// ## tol + /// tolerance: acceptable misprediction rate in training + /// + /// ## measCount + /// number of true quantum measurements to estimate probabilities. + /// IMPORTANT: measCount==0 implies simulator deployment + /// + /// # Output + /// ((no.hits,no.misses),(opt.bias,opt.parameters)) + /// + operation StochasticTrainingLoop(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, + miniBatchSizeInital: Int, param: Double[], gates: GateSequence, bias: Double, lrateInitial: Double, maxEpochs: Int, tol: Double, measCount: Int): ((Int,Int),(Double,Double[])) + { + let HARDCODEDmaxIter = 10; + //const + let manyNoops = 4; + //const + let relFuzz = 0.01; + let HARDCODEDmaxNoops = 2*manyNoops; + mutable pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); + mutable biasBest = adjustBias(pls, bias, tol, HARDCODEDmaxIter); + let (h0,m0) = TallyHitsMisses(pls,biasBest); + mutable hBest = h0; + mutable mBest = m0; + mutable paramBest = param; + mutable paramCurrent = param; + mutable biasCurrent = biasBest; + + //reintroducing learning rate heuristics + mutable lrate = lrateInitial; + mutable batchSize = miniBatchSizeInital; + mutable noopCount = 0; + mutable upBias = biasCurrent; + mutable upParam = paramCurrent; + for (ep in 1..maxEpochs) { + let ((h1,m1),(upB,upP)) = OneStochasticTrainingEpoch(samples, sched, schedScore, periodScore, + batchSize, paramCurrent, gates, biasCurrent, lrate, tol, measCount, hBest, mBest); + set upBias = upB; + set upParam = upP; + if (m1 < mBest) + { + set hBest = h1; + set mBest = m1; + set paramBest = upParam; + set biasBest = upBias; + if (IntAsDouble (mBest)/IntAsDouble (mBest+hBest)< tol) //Terminate based on tolerance + { + return ((hBest,mBest),(biasBest,paramBest)); + } + set noopCount = 0; //Reset the counter of consequtive noops + set lrate = lrateInitial; + set batchSize = miniBatchSizeInital; + } + if (NearlyEqualD(biasCurrent,upBias) and _AllNearlyEqualD(paramCurrent,upParam)) + { + set noopCount = noopCount+1; + if (noopCount > manyNoops) + { + if (noopCount > HARDCODEDmaxNoops) + { + return ((hBest,mBest),(biasBest,paramBest)); //Too many non-steps. Continuation makes no sense + } + else + { + set upBias = randomize(upBias, relFuzz); + set upParam = ForEach(randomize(_, relFuzz), upParam); + } + } + set batchSize = noopCount; //batchSize + 1; //Try to fuzz things up with smaller batch count + //and heat up a bit + set lrate = 1.25*lrate; + } + else + { + set noopCount = 0; //Reset the counter of consequtive noops + set lrate = lrateInitial; + set batchSize = miniBatchSizeInital; + } + set paramCurrent = upParam; + set biasCurrent = upBias; + } + + return ((hBest,mBest),(biasBest,paramBest)); + } + +} diff --git a/MachineLearning/src/Runtime/Types.qs b/MachineLearning/src/Runtime/Types.qs new file mode 100644 index 00000000000..586f014cbe1 --- /dev/null +++ b/MachineLearning/src/Runtime/Types.qs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Arithmetic; + + /// Qubit span of a multicontrolled single-qubit gate + newtype GateSpan = ( + TargetIndex: Int, + ControlIndices: Int[] + ); + + /// One-parameter controlled rotation gate triplet: + /// (control structure, rotation axis, index of the rotation parameter) + newtype ControlledRotation = ( + Span: GateSpan, + Axis: Pauli, + Index: Int + ); + + /// Abstraction for sequence of gates + newtype GateSequence = ControlledRotation[]; + + /// Abstraction for state preparation + /// Fst(StateGenerator) is the number of qubits + /// Snd(Stategenerator) is a circuit to prepare subject state + newtype StateGenerator = ( + NQubits: Int, + Apply: (LittleEndian => Unit is Adj + Ctl) + ); + + /// Convention: negative Snd(labledSample) signifies the last sample in a batch + newtype LabeledSample = ( + Features: Double[], + Label: Int + ); + + // Here, we define a couple private accessor functions for LabeledSample, + // in lieu of having lambda support. These should not be used in external + // code. + function _Features(sample : LabeledSample) : Double[] { return sample::Features; } + function _Label(sample : LabeledSample) : Int { return sample::Label; } + + /// Abstraction for a two-level range of indices + newtype SamplingSchedule = Range[]; + + newtype ValidationResults = ( + NMisclassifications: Int + ); + + + +} diff --git a/MachineLearning/src/Runtime/Utils.qs b/MachineLearning/src/Runtime/Utils.qs new file mode 100644 index 00000000000..6a97180404d --- /dev/null +++ b/MachineLearning/src/Runtime/Utils.qs @@ -0,0 +1,12 @@ +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Logical; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Math; + + function _AllNearlyEqualD(v1: Double[], v2: Double[]):Bool { + return Length(v1) == Length(v2) and All(NearlyEqualD, Zip(v1, v2)); + } + +} diff --git a/MachineLearning/src/Runtime/Validation.qs b/MachineLearning/src/Runtime/Validation.qs new file mode 100644 index 00000000000..49b64e9c69d --- /dev/null +++ b/MachineLearning/src/Runtime/Validation.qs @@ -0,0 +1,76 @@ +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + + function NMismatches(proposed: Int[], actual: Int[], validationSchedule: SamplingSchedule): Int { + mutable count = 0; + mutable ir = 0; + for (rg in validationSchedule!) { + for (ix in rg) { + if (proposed[ir] != actual[ix]) { + set count += 1; + } + set ir += 1; + } + } + return count; + } + + /// # Summary + /// Using a flat description of a trained classification model, count + /// the number of mispredictions occuring over the validation set + /// + /// # Input + /// ## nQubits + /// the number of qubits used for data encoding + /// + /// ## trainingSet + /// the set of training samples + /// + /// ## trainingLabels + /// the set of training labels + /// + /// ## validatioSchedule + /// defines a subset of training data used for validation and computation of the *bias* + /// + /// ## gates + /// Flat representation of classifier structure. Each element is + /// [parameterIndex, pauliCode, targetQubit, sequence of control qubits] + /// + /// ## parameters + /// an array of candidate parameters + /// + /// ## bias + /// candidate predition bias + /// + /// ## nMeasurenets + /// number of the measurement cycles to be used for estimation of each probability + /// + /// # Output + /// the number of misclassifications + /// + operation CountValidationMisses(tolerance: Double, nQubits: Int, trainingSet: Double[][], trainingLabels: Int[], validationSchedule: Int[][], gates: Int[][], parameters: Double[],bias:Double, nMeasurements: Int) : Int + { + let schValidate = unFlattenSchedule(validationSchedule); + let results = ValidateModel( + tolerance, nQubits, Mapped(LabeledSample, Zip(trainingSet, trainingLabels)), + schValidate, unFlattenGateSequence(gates), + parameters, bias, nMeasurements + ); + return results::NMisclassifications; + } + + operation ValidateModel(tolerance: Double, nQubits: Int, samples : LabeledSample[], validationSchedule: SamplingSchedule, gates: GateSequence, parameters: Double[], bias:Double, nMeasurements: Int) : ValidationResults + { + let features = Mapped(_Features, samples); + let labels = Mapped(_Label, samples); + let probsValidation = EstimateClassificationProbabilitiesClassicalData(tolerance, features, validationSchedule, nQubits, gates, parameters, nMeasurements); + let localPL = InferredLabels(probsValidation, bias); + let nMismatches = NMismatches(localPL, labels, validationSchedule); + return ValidationResults( + nMismatches + ); + } + +} diff --git a/MachineLearning/tests/MachineLearningTests.csproj b/MachineLearning/tests/MachineLearningTests.csproj new file mode 100644 index 00000000000..d30ed6814b8 --- /dev/null +++ b/MachineLearning/tests/MachineLearningTests.csproj @@ -0,0 +1,26 @@ + + + netcoreapp3.0 + x64 + false + + + + + + + + + + + + + + + + + + + + + diff --git a/Numerics/src/Numerics.csproj b/Numerics/src/Numerics.csproj index fc71e1db614..5df98732762 100644 --- a/Numerics/src/Numerics.csproj +++ b/Numerics/src/Numerics.csproj @@ -30,7 +30,7 @@ - + diff --git a/Numerics/tests/NumericsTests.csproj b/Numerics/tests/NumericsTests.csproj index 73a0a72d9fe..3f1f62f3317 100644 --- a/Numerics/tests/NumericsTests.csproj +++ b/Numerics/tests/NumericsTests.csproj @@ -15,8 +15,8 @@ - - + + diff --git a/Standard/src/Arithmetic/Reflections.qs b/Standard/src/Arithmetic/Reflections.qs new file mode 100644 index 00000000000..e3240c9c464 --- /dev/null +++ b/Standard/src/Arithmetic/Reflections.qs @@ -0,0 +1,29 @@ +namespace Microsoft.Quantum.Arithmetic { + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + + ///Flip the sign of just one amplitude + operation ReflectAboutInteger(index : Int, reg : LittleEndian): Unit is Adj + Ctl { + let nQubits = Length(reg!); + let bitstring = IntAsBoolArray(index, nQubits); + if (nQubits < 2) { + within { + if (not bitstring[0]) { + X(reg![0]); + } + } apply { + Z(reg![0]); + } + } else { + within { + ApplyToEachCA(CControlledCA(X), Zip(bitstring, reg!)); + } apply { + (Controlled Z)(Most(reg!), Tail(reg!)); //The true complexity of this operation is in O(nQubits) + } + } + } //_amplitudeSignFlip + + +} diff --git a/Standard/src/Arrays/Arrays.qs b/Standard/src/Arrays/Arrays.qs index 064375e5512..39ee97b74a3 100644 --- a/Standard/src/Arrays/Arrays.qs +++ b/Standard/src/Arrays/Arrays.qs @@ -280,6 +280,20 @@ namespace Microsoft.Quantum.Arrays { return output; } + /// # Summary + /// Returns true if and only if an array is empty. + /// + /// # Input + /// ## array + /// The array to be checked. + /// + /// # Output + /// `true` if and only if the array is empty (has length 0). + function IsEmpty<'T>(array : 'T[]) : Bool { + return Length(array) == 0; + } + + } diff --git a/Standard/src/Canon/Combinators/ApplyToElement.qs b/Standard/src/Canon/Combinators/ApplyToElement.qs new file mode 100644 index 00000000000..9aaadb70263 --- /dev/null +++ b/Standard/src/Canon/Combinators/ApplyToElement.qs @@ -0,0 +1,114 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Canon { + + /// # Summary + /// Applies an operation to a given element of an array. + /// + /// # Description + /// Given an operation `op`, an index `index`, and an array of targets `targets`, + /// applies `op(targets[index])`. + /// + /// # Input + /// ## op + /// An operation to be applied. + /// ## index + /// An index into the array of targets. + /// ## target + /// An array of possible targets for `op`. + /// + /// # Type Parameters + /// ## 'T + /// The input type of the operation to be applied. + /// + /// # See Also + /// - Microsoft.Quantum.Canon.ApplyToElementC + /// - Microsoft.Quantum.Canon.ApplyToElementA + /// - Microsoft.Quantum.Canon.ApplyToElementCA + operation ApplyToElement<'T> (op : ('T => Unit), index : Int, targets : 'T[]) : Unit { + op(targets[index]); + } + + /// # Summary + /// Applies an operation to a given element of an array. + /// + /// # Description + /// Given an operation `op`, an index `index`, and an array of targets `targets`, + /// applies `op(targets[index])`. + /// + /// # Input + /// ## op + /// An operation to be applied. + /// ## index + /// An index into the array of targets. + /// ## target + /// An array of possible targets for `op`. + /// + /// # Type Parameters + /// ## 'T + /// The input type of the operation to be applied. + /// + /// # See Also + /// - Microsoft.Quantum.Canon.ApplyToElement + /// - Microsoft.Quantum.Canon.ApplyToElementA + /// - Microsoft.Quantum.Canon.ApplyToElementCA + operation ApplyToElementC<'T> (op : ('T => Unit is Ctl), index : Int, targets : 'T[]) : Unit is Ctl { + op(targets[index]); + } + + /// # Summary + /// Applies an operation to a given element of an array. + /// + /// # Description + /// Given an operation `op`, an index `index`, and an array of targets `targets`, + /// applies `op(targets[index])`. + /// + /// # Input + /// ## op + /// An operation to be applied. + /// ## index + /// An index into the array of targets. + /// ## target + /// An array of possible targets for `op`. + /// + /// # Type Parameters + /// ## 'T + /// The input type of the operation to be applied. + /// + /// # See Also + /// - Microsoft.Quantum.Canon.ApplyToElement + /// - Microsoft.Quantum.Canon.ApplyToElementC + /// - Microsoft.Quantum.Canon.ApplyToElementCA + operation ApplyToElementA<'T> (op : ('T => Unit is Adj), index : Int, targets : 'T[]) : Unit is Adj { + op(targets[index]); + } + + /// # Summary + /// Applies an operation to a given element of an array. + /// + /// # Description + /// Given an operation `op`, an index `index`, and an array of targets `targets`, + /// applies `op(targets[index])`. + /// + /// # Input + /// ## op + /// An operation to be applied. + /// ## index + /// An index into the array of targets. + /// ## target + /// An array of possible targets for `op`. + /// + /// # Type Parameters + /// ## 'T + /// The input type of the operation to be applied. + /// + /// # See Also + /// - Microsoft.Quantum.Canon.ApplyToElement + /// - Microsoft.Quantum.Canon.ApplyToElementC + /// - Microsoft.Quantum.Canon.ApplyToElementA + operation ApplyToElementCA<'T> (op : ('T => Unit is Adj + Ctl), index : Int, targets : 'T[]) : Unit is Adj + Ctl { + op(targets[index]); + } + +} diff --git a/Standard/src/Logical/Predicates.qs b/Standard/src/Logical/Predicates.qs index fb5a8fff4db..4db663a13d3 100644 --- a/Standard/src/Logical/Predicates.qs +++ b/Standard/src/Logical/Predicates.qs @@ -70,6 +70,29 @@ namespace Microsoft.Quantum.Logical { return a == b; } + /// # Summary + /// Returns true if and only if two inputs are nearly equal (that is, within + /// a tolerance of 1e-12). + /// + /// # Input + /// ## a + /// The first value to be compared. + /// ## b + /// The second value to be compared. + /// + /// # Output + /// `true` if and only if `a` is nearly equal to `b`. + /// + /// # Remarks + /// The following are equivalent: + /// ```Q# + /// let cond = Microsoft.Quantum.Math.AbsD(a - b) < 1e-12; + /// let cond = NearlyEqualD(a, b); + /// ``` + function NearlyEqualD(a : Double, b : Double) : Bool { + return AbsD(a - b) < 1e-12; + } + /// # Summary /// Returns true if and only if two inputs are equal. /// @@ -211,6 +234,29 @@ namespace Microsoft.Quantum.Logical { return a != b; } + /// # Summary + /// Returns true if and only if two inputs are not nearly equal (that is, + /// are not within a tolerance of 1e-12). + /// + /// # Input + /// ## a + /// The first value to be compared. + /// ## b + /// The second value to be compared. + /// + /// # Output + /// `true` if and only if `a` is not nearly equal to `b`. + /// + /// # Remarks + /// The following are equivalent: + /// ```Q# + /// let cond = Microsoft.Quantum.Math.AbsD(a - b) >= 1e-12; + /// let cond = NotNearlyEqualD(a, b); + /// ``` + function NotNearlyEqualD(a : Double, b : Double) : Bool { + return not NearlyEqualD(a, b); + } + /// # Summary /// Returns true if and only if two inputs are not equal. /// diff --git a/Standard/src/Math/Functions.qs b/Standard/src/Math/Functions.qs index 8ee52d8a19a..0f2cf4d4cd4 100644 --- a/Standard/src/Math/Functions.qs +++ b/Standard/src/Math/Functions.qs @@ -610,6 +610,29 @@ namespace Microsoft.Quantum.Math { return PowD(norm, 1.0 / p); } + + /// # Summary + /// Returns the squared 2-norm of a vector. + /// + /// # Description + /// Returns the squared 2-norm of a vector; that is, given an input + /// $\vec{x}$, returns $\sum_i x_i^2$. + /// + /// # Input + /// ## array + /// The vector whose squared 2-norm is to be returned. + /// + /// # Output + /// The squared 2-norm of `array`. + function SquaredNorm(array : Double[]) : Double { + mutable ret = 0.0; + for (element in array) { + set ret += element * element; + } + return ret; + } + + /// # Summary /// Normalizes a vector of `Double`s in the `L(p)` norm. /// diff --git a/Standard/src/Measurement/Reset.qs b/Standard/src/Measurement/Reset.qs index 6f67371e58e..bdc45200efe 100644 --- a/Standard/src/Measurement/Reset.qs +++ b/Standard/src/Measurement/Reset.qs @@ -16,6 +16,26 @@ namespace Microsoft.Quantum.Measurement { controlled adjoint distribute; } + + /// # Summary + /// Sets a qubit to a given computational basis state by measuring the + /// qubit and applying a bit flip if needed. + /// + /// # Input + /// ## desired + /// The basis state that the qubit should be set to. + /// ## target + /// The qubit whose state is to be set. + /// + /// # Remarks + /// As an invariant of this operation, calling `M(q)` immediately + /// after `SetToBasisState(result, q)` will return `result`. + operation SetToBasisState(desired : Result, target : Qubit) : Unit { + if (desired != M(target)) { + X(target); + } + } + /// # Summary /// Measures a single qubit in the `Z` basis, /// and resets it to the standard basis state diff --git a/Standard/src/Standard.csproj b/Standard/src/Standard.csproj index fbd6fbe0cbe..b8436bb2455 100644 --- a/Standard/src/Standard.csproj +++ b/Standard/src/Standard.csproj @@ -30,7 +30,7 @@ - + diff --git a/Standard/tests/Standard.Tests.csproj b/Standard/tests/Standard.Tests.csproj index 7fb09117bae..771ff3c378a 100644 --- a/Standard/tests/Standard.Tests.csproj +++ b/Standard/tests/Standard.Tests.csproj @@ -20,8 +20,8 @@ - - + + From 6d23ce3b4710f5c104039eb433e2a6152a5ee320 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Wed, 20 Nov 2019 15:21:13 -0800 Subject: [PATCH 02/43] Unit tests for new operations and functions in M.Q.Standard used to support QML (#177) * Started writing unit tests for new standard library functionality. * Testing new array fn. * Fixes to Runtime project. * Added more tests. * Added tesets for rest of functionality added to M.Q.Std. --- MachineLearning/src/Runtime/Runtime.csproj | 5 ++ Standard/src/Arithmetic/Reflections.qs | 52 ++++++++++++------- Standard/src/Math/Functions.qs | 14 ++--- Standard/src/Measurement/Reset.qs | 4 +- Standard/tests/AmplitudeAmplificationTests.qs | 1 + Standard/tests/Arithmetic/ReflectionTests.qs | 29 +++++++++++ Standard/tests/ArrayTests.qs | 8 +++ Standard/tests/CombinatorTests.qs | 27 ++++++++++ Standard/tests/Logical/PredicateTests.qs | 6 +++ Standard/tests/Math/MathTests.qs | 6 +++ Standard/tests/Measurement/ResetTests.qs | 24 +++++++++ 11 files changed, 147 insertions(+), 29 deletions(-) create mode 100644 Standard/tests/Arithmetic/ReflectionTests.qs create mode 100644 Standard/tests/Measurement/ResetTests.qs diff --git a/MachineLearning/src/Runtime/Runtime.csproj b/MachineLearning/src/Runtime/Runtime.csproj index 51305f555db..e9e8700740f 100644 --- a/MachineLearning/src/Runtime/Runtime.csproj +++ b/MachineLearning/src/Runtime/Runtime.csproj @@ -2,6 +2,11 @@ netstandard2.1 x64 + Microsoft.Quantum.MachineLearning.Runtime + + + + True diff --git a/Standard/src/Arithmetic/Reflections.qs b/Standard/src/Arithmetic/Reflections.qs index e3240c9c464..81e2af3afdc 100644 --- a/Standard/src/Arithmetic/Reflections.qs +++ b/Standard/src/Arithmetic/Reflections.qs @@ -3,27 +3,39 @@ namespace Microsoft.Quantum.Arithmetic { open Microsoft.Quantum.Convert; open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Logical; - ///Flip the sign of just one amplitude - operation ReflectAboutInteger(index : Int, reg : LittleEndian): Unit is Adj + Ctl { - let nQubits = Length(reg!); - let bitstring = IntAsBoolArray(index, nQubits); - if (nQubits < 2) { - within { - if (not bitstring[0]) { - X(reg![0]); - } - } apply { - Z(reg![0]); - } - } else { - within { - ApplyToEachCA(CControlledCA(X), Zip(bitstring, reg!)); - } apply { - (Controlled Z)(Most(reg!), Tail(reg!)); //The true complexity of this operation is in O(nQubits) - } + /// # Summary + /// Reflects a quantum register about a given classical integer. + /// + /// # Description + /// Given a quantum register initially in the state $\sum_i \alpha_i \ket{i}$, + /// where each $\ket{i}$ is a basis state representing an integer $i$, + /// reflects the state of the register about the basis state for a given + /// integer $\ket{j}$, + /// $$ + /// \sum_i (-1)^{ \delta_{ij} } \alpha_i \ket{i} + /// $$ + /// + /// # Input + /// ## index + /// The classical integer indexing the basis state about which to reflect. + /// + /// # Remarks + /// This operation is implemented in-place, without explicit allocation of + /// additional auxillary qubits. + operation ReflectAboutInteger(index : Int, reg : LittleEndian) : Unit is Adj + Ctl { + within { + // We want to reduce to the problem of reflecting about the all-ones + // state. To do that, we apply our reflection within an application + // of X instructions that flip all the zeros in our index. + ApplyToEachCA( + CControlledCA(X), + Zip(Mapped(Not, IntAsBoolArray(index, Length(reg!))), reg!) + ); + } apply { + (Controlled Z)(Most(reg!), Tail(reg!)); } - } //_amplitudeSignFlip - + } } diff --git a/Standard/src/Math/Functions.qs b/Standard/src/Math/Functions.qs index 0f2cf4d4cd4..7fc4cd2300e 100644 --- a/Standard/src/Math/Functions.qs +++ b/Standard/src/Math/Functions.qs @@ -611,7 +611,7 @@ namespace Microsoft.Quantum.Math { } - /// # Summary + /// # Summary /// Returns the squared 2-norm of a vector. /// /// # Description @@ -625,12 +625,12 @@ namespace Microsoft.Quantum.Math { /// # Output /// The squared 2-norm of `array`. function SquaredNorm(array : Double[]) : Double { - mutable ret = 0.0; - for (element in array) { - set ret += element * element; - } - return ret; - } + mutable ret = 0.0; + for (element in array) { + set ret += element * element; + } + return ret; + } /// # Summary diff --git a/Standard/src/Measurement/Reset.qs b/Standard/src/Measurement/Reset.qs index bdc45200efe..f9687a7a678 100644 --- a/Standard/src/Measurement/Reset.qs +++ b/Standard/src/Measurement/Reset.qs @@ -17,7 +17,7 @@ namespace Microsoft.Quantum.Measurement { } - /// # Summary + /// # Summary /// Sets a qubit to a given computational basis state by measuring the /// qubit and applying a bit flip if needed. /// @@ -30,7 +30,7 @@ namespace Microsoft.Quantum.Measurement { /// # Remarks /// As an invariant of this operation, calling `M(q)` immediately /// after `SetToBasisState(result, q)` will return `result`. - operation SetToBasisState(desired : Result, target : Qubit) : Unit { + operation SetToBasisState(desired : Result, target : Qubit) : Unit { if (desired != M(target)) { X(target); } diff --git a/Standard/tests/AmplitudeAmplificationTests.qs b/Standard/tests/AmplitudeAmplificationTests.qs index fb98ac602ba..8f0ce1952e2 100644 --- a/Standard/tests/AmplitudeAmplificationTests.qs +++ b/Standard/tests/AmplitudeAmplificationTests.qs @@ -1,5 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. + namespace Microsoft.Quantum.Tests { open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Canon; diff --git a/Standard/tests/Arithmetic/ReflectionTests.qs b/Standard/tests/Arithmetic/ReflectionTests.qs new file mode 100644 index 00000000000..5002490cd97 --- /dev/null +++ b/Standard/tests/Arithmetic/ReflectionTests.qs @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Tests { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Arithmetic; + open Microsoft.Quantum.Diagnostics; + + operation ManuallyReflectAboutFive(register : Qubit[]) : Unit is Adj + Ctl { + within { + X(register[1]); + } apply { + Controlled Z(register[0..1], register[2]); + } + } + + operation ReflectAboutFiveUsingLibrary(register : Qubit[]) : Unit is Adj + Ctl { + let littleEndian = LittleEndian(register); + ReflectAboutInteger(5, littleEndian); + } + + operation ReflectAboutIntegerTest() : Unit { + AssertOperationsEqualReferenced(3, + ReflectAboutFiveUsingLibrary, + ManuallyReflectAboutFive + ); + } + +} diff --git a/Standard/tests/ArrayTests.qs b/Standard/tests/ArrayTests.qs index 528c2f968ba..0cf9e169289 100644 --- a/Standard/tests/ArrayTests.qs +++ b/Standard/tests/ArrayTests.qs @@ -153,6 +153,14 @@ namespace Microsoft.Quantum.Tests { } } + function IsEmptyTest() : Unit { + Fact(IsEmpty(new Int[0]), "Empty array marked as non-empty."); + Fact(IsEmpty(new Qubit[0]), "Empty array marked as non-empty."); + Fact(IsEmpty(new (Double, (Int -> String))[0]), "Empty array marked as non-empty."); + Fact(not IsEmpty([PauliX, PauliZ]), "Non-empty array marked as empty."); + Fact(not IsEmpty([""]), "Non-empty array marked as empty."); + } + } diff --git a/Standard/tests/CombinatorTests.qs b/Standard/tests/CombinatorTests.qs index 5376e10e04b..9834abaec41 100644 --- a/Standard/tests/CombinatorTests.qs +++ b/Standard/tests/CombinatorTests.qs @@ -1,5 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. + namespace Microsoft.Quantum.Tests { open Microsoft.Quantum.Math; open Microsoft.Quantum.Canon; @@ -349,4 +350,30 @@ namespace Microsoft.Quantum.Tests { AssertOperationsEqualReferenced(2, ApplyIfElseBCACase(false, _), ApplyToEachA(X, _)); } + operation ApplyXToSecondQubit(qubits : Qubit[]) : Unit is Adj + Ctl { + X(qubits[1]); + } + + operation ApplyToElementTest() : Unit { + AssertOperationsEqualReferenced(3, + ApplyToElement(X, 1, _), + ApplyXToSecondQubit + ); + + AssertOperationsEqualReferenced(3, + ApplyToElementC(X, 1, _), + ApplyXToSecondQubit + ); + + AssertOperationsEqualReferenced(3, + ApplyToElementA(X, 1, _), + ApplyXToSecondQubit + ); + + AssertOperationsEqualReferenced(3, + ApplyToElementCA(X, 1, _), + ApplyXToSecondQubit + ); + } + } diff --git a/Standard/tests/Logical/PredicateTests.qs b/Standard/tests/Logical/PredicateTests.qs index 75270552c88..5c970d97e46 100644 --- a/Standard/tests/Logical/PredicateTests.qs +++ b/Standard/tests/Logical/PredicateTests.qs @@ -96,4 +96,10 @@ namespace Microsoft.Quantum.Tests { Fact(not LessThanOrEqualL(32L, -13L), "LessThanOrEqualL returned wrong output."); } + function NearlyEqualDTest() : Unit { + Fact(NearlyEqualD(1.0, 1.0), "Exactly equal numbers marked as not nearly equal."); + Fact(NearlyEqualD(1.0, 1.0 + 1e-15), "Nearly equal numbers marked as not nearly equal."); + Fact(not NearlyEqualD(1.0, 1000.0), "Not nearly equal numbers marked as nearly equal."); + } + } diff --git a/Standard/tests/Math/MathTests.qs b/Standard/tests/Math/MathTests.qs index 0476d1cb770..3a8c27b1321 100644 --- a/Standard/tests/Math/MathTests.qs +++ b/Standard/tests/Math/MathTests.qs @@ -150,6 +150,12 @@ namespace Microsoft.Quantum.Canon { } } } + + function SquaredNormTest() : Unit { + NearEqualityFactD(SquaredNorm([2.0]), 4.0); + NearEqualityFactD(SquaredNorm([1.0, 1.0]), 2.0); + NearEqualityFactD(SquaredNorm([3.0, 4.0]), 25.0); + } } diff --git a/Standard/tests/Measurement/ResetTests.qs b/Standard/tests/Measurement/ResetTests.qs new file mode 100644 index 00000000000..45056d785e2 --- /dev/null +++ b/Standard/tests/Measurement/ResetTests.qs @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Tests { + open Microsoft.Quantum.Measurement; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Diagnostics; + + operation CheckSetToBasisState(desired : Result) : Unit { + using (q = Qubit()) { + Ry(0.1234, q); + SetToBasisState(desired, q); + AssertQubit(desired, q); + Reset(q); + } + } + + operation SetToBasisStateTest() : Unit { + for (desired in [Zero, One]) { + CheckSetToBasisState(desired); + } + } + +} From 929478b921dce1e8b094d67b00d959099eb5ffa3 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Fri, 22 Nov 2019 13:35:59 -0800 Subject: [PATCH 03/43] Add repeated operation (#153) (#180) * Drafted new apply ops * Fixed docs * Fixed minor bugs * Added Permutation function + helper Arrays + Claim functions * Added Adj + Ctl and set csproj back to generating docs * Added tests * Fixed test errors * Updated code from comments; moved PermuteQubits to CommonGates * Fixed minor bugs * Apply suggestions from code review Co-Authored-By: Chris Granade * Added some fixes from changes * Added most recommendations from Mathias * Added example to ApplySeriesOfOps * Added new examples * Added PermuteQubits example * Changed Swap Order to be appending to an array, added test for it * Updated ApplyOpRepeatedlyOver Docs * Added Mathias' comments * Reverted csproj file * Renamed TupleArrayAsNestedArray --- Standard/src/Arrays/Arrays.qs | 107 ++++++++ .../Canon/Combinators/ApplyRepeatedOver.qs | 256 ++++++++++++++++++ Standard/src/Canon/CommonGates.qs | 28 ++ Standard/tests/ApplyRepeatedOverTests.qs | 67 +++++ Standard/tests/ArrayTests.qs | 107 +++++--- 5 files changed, 532 insertions(+), 33 deletions(-) create mode 100644 Standard/src/Canon/Combinators/ApplyRepeatedOver.qs create mode 100644 Standard/tests/ApplyRepeatedOverTests.qs diff --git a/Standard/src/Arrays/Arrays.qs b/Standard/src/Arrays/Arrays.qs index 39ee97b74a3..71bb8be81c7 100644 --- a/Standard/src/Arrays/Arrays.qs +++ b/Standard/src/Arrays/Arrays.qs @@ -2,8 +2,11 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Arrays { + open Microsoft.Quantum.Convert; open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Math; + open Microsoft.Quantum.Logical; /// # Summary /// Create an array that contains the same elements as an input array but in Reversed @@ -295,5 +298,109 @@ namespace Microsoft.Quantum.Arrays { } + function _IsPermutationPred(permutation : Int[], value : Int) : Bool { + let index = IndexOf(EqualI(value, _), permutation); + return index != -1; + } + + function _IsPermutation(permuation : Int[]) : Bool { + return All(_IsPermutationPred(permuation, _), RangeAsIntArray(IndexRange(permuation))); + } + + /// # Summary + /// Returns the order elements in an array need to be swapped to produce an ordered array. + /// Assumes swaps occur in place. + /// + /// # Input + /// ## newOrder + /// Array with the permutation of the indices of the new array. There should be $n$ elements, + /// each being a unique integer from $0$ to $n-1$. + /// + /// # Output + /// The tuple represents the two indices to be swapped. The swaps begin at the lowest index. + /// + /// # Remarks + /// ## Example + /// ```qsharp + /// // The following returns [(0, 5),(0, 4),(0, 1),(0, 3)]; + /// let swapOrder = _SwapOrderToPermuteArray([5, 3, 2, 0, 1, 4]); + /// ``` + /// + /// ## Psuedocode + /// for (index in 0..Length(newOrder) - 1) + /// { + /// while newOrder[index] != index + /// { + /// Switch newOrder[index] with newOrder[newOrder[index]] + /// } + /// } + function _SwapOrderToPermuteArray(newOrder : Int[]) : (Int, Int)[] { + // Check to verify the new ordering actually is a permutation of the indices + Fact(_IsPermutation(newOrder), $"The new ordering is not a permutation"); + + mutable swaps = new (Int, Int)[0]; + mutable order = newOrder; + + // for each value, whenever the index and value don't match, swap until it does + for (index in IndexRange(order)) { + while (not EqualI(order[index], index)) + { + set swaps += [(index, order[index])]; + set order = Swapped(order[index], index, order); + } + } + + return swaps; + } + + /// # Summary + /// Applies a swap of two elements in an array. + /// + /// # Input + /// ## firstIndex + /// Index of the first element to be swapped. + /// + /// ## secondIndex + /// Index of the second element to be swapped. + /// + /// ## arr + /// Array with elements to be swapped. + /// + /// # Output + /// The array with the in place swapp applied. + /// + /// ## Example + /// ```qsharp + /// // The following returns [0, 3, 2, 1, 4] + /// Swapped(1, 3, [0, 1, 2, 3, 4]); + function Swapped<'T>(firstIndex: Int, secondIndex: Int, arr: 'T[]) : 'T[] { + return arr + w/ firstIndex <- arr[secondIndex] + w/ secondIndex <- arr[firstIndex]; + } + /// # Summary + /// Turns a list of 2-tuples into a nested array. + /// + /// # Input + /// ## tupleList + /// List of 2-tuples to be turned into a nested array. + /// + /// # Output + /// A nested array with length matching the tupleList. + /// + /// ## Example + /// ```qsharp + /// // The following returns [[2, 3], [4, 5]] + /// TupleArrayAsNestedArray([(2, 3), (4, 5)]); + /// ``` + function TupleArrayAsNestedArray<'T>(tupleList : ('T, 'T)[]) : 'T[][] { + mutable newArray = new 'T[][Length(tupleList)]; + for (idx in IndexRange(tupleList)) { + let (tupleLeft, tupleRight) = tupleList[idx]; + set newArray w/= idx <- [tupleLeft, tupleRight]; + } + return newArray; + } +} diff --git a/Standard/src/Canon/Combinators/ApplyRepeatedOver.qs b/Standard/src/Canon/Combinators/ApplyRepeatedOver.qs new file mode 100644 index 00000000000..7db8042d35e --- /dev/null +++ b/Standard/src/Canon/Combinators/ApplyRepeatedOver.qs @@ -0,0 +1,256 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Canon { + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Intrinsic; + + /////////////////////////////////////////////////////////////////////////////////////////////// + // Helpers to repeatedly apply operations over qubit arrays + /////////////////////////////////////////////////////////////////////////////////////////////// + + /// # Summary + /// Applies a list of ops and their targets sequentially on an array. + /// + /// # Input + /// ## listOfOps + /// List of ops, each taking a 'T array, to be applied. They are applied sequentially, lowest index first. + /// ## targets + /// Nested arrays describing the targets of the op. Each array should contain a list of ints describing + /// the indices to be used. + /// ## register + /// Qubit register to be acted upon. + /// + /// ## Example + /// // The following applies Exp([PauliX, PauliY], 0.5) to qubits 0, 1 + /// // then X to qubit 2 + /// let ops = [Exp([PauliX, PauliY], 0.5, _), ApplyToFirstQubit(X, _)]; + /// let indices = [[0, 1], [2]]; + /// ApplySeriesOfOps(ops, indices, qubitArray); + /// + /// # See Also + /// - Microsoft.Quantum.Canon.ApplyOpRepeatedlyOver + operation ApplySeriesOfOps<'T>(listOfOps : ('T[] => Unit)[], targets : Int[][], register : 'T[]) : Unit { + if (Length(listOfOps) != Length(targets)) { + fail "The number of ops and number of targets do not match!"; + } + for ((op, targetIndices) in Zip(listOfOps, targets)) { + if (Length(targetIndices) > Length(register)) { + fail "There are too many targets!"; + } + op(Subarray(targetIndices, register)); + } + } + + /// # Summary + /// Applies a list of ops and their targets sequentially on an array. (Adjoint) + /// + /// # Input + /// ## listOfOps + /// List of ops, each taking a 'T array, to be applied. They are applied sequentially, lowest index first. + /// Each must have an adjoint functor + /// ## targets + /// Nested arrays describing the targets of the op. Each array should contain a list of ints describing + /// the indices to be used. + /// ## register + /// Qubit register to be acted upon. + /// + /// ## Example + /// // The following applies Exp([PauliX, PauliY], 0.5) to qubits 0, 1 + /// // then X to qubit 2 + /// let ops = [Exp([PauliX, PauliY], 0.5, _), ApplyToFirstQubitA(X, _)]; + /// let indices = [[0, 1], [2]]; + /// ApplySeriesOfOpsA(ops, indices, qubitArray); + /// + /// # See Also + /// - Microsoft.Quantum.Canon.ApplyOpRepeatedlyOver + operation ApplySeriesOfOpsA<'T>(listOfOps : ('T[] => Unit is Adj)[], targets : Int[][], register : 'T[]) : Unit is Adj{ + if (Length(listOfOps) != Length(targets)) { + fail "The number of ops and number of targets do not match!"; + } + for ((op, targetIndices) in Zip(listOfOps, targets)) { + if (Length(targetIndices) > Length(register)) { + fail "There are too many targets!"; + } + op(Subarray(targetIndices, register)); + } + } + + /// # Summary + /// Applies a list of ops and their targets sequentially on an array. (Controlled) + /// + /// # Input + /// ## listOfOps + /// List of ops, each taking a 'T array, to be applied. They are applied sequentially, lowest index first. + /// Each must have a Controlled functor + /// ## targets + /// Nested arrays describing the targets of the op. Each array should contain a list of ints describing + /// the indices to be used. + /// ## register + /// Qubit register to be acted upon. + /// + /// ## Example + /// // The following applies Exp([PauliX, PauliY], 0.5) to qubits 0, 1 + /// // then X to qubit 2 + /// let ops = [Exp([PauliX, PauliY], 0.5, _), ApplyToFirstQubitC(X, _)]; + /// let indices = [[0, 1], [2]]; + /// ApplySeriesOfOpsC(ops, indices, qubitArray); + /// + /// # See Also + /// - Microsoft.Quantum.Canon.ApplyOpRepeatedlyOver + operation ApplySeriesOfOpsC<'T>(listOfOps : ('T[] => Unit is Ctl)[], targets : Int[][], register : 'T[]) : Unit is Ctl{ + if (Length(listOfOps) != Length(targets)) { + fail "The number of ops and number of targets do not match!"; + } + for ((op, targetIndices) in Zip(listOfOps, targets)) { + if (Length(targetIndices) > Length(register)) { + fail "There are too many targets!"; + } + op(Subarray(targetIndices, register)); + } + } + + /// # Summary + /// Applies a list of ops and their targets sequentially on an array. (Adjoint + Controlled) + /// + /// # Input + /// ## listOfOps + /// List of ops, each taking a 'T array, to be applied. They are applied sequentially, lowest index first. + /// Each must have both an Adjoint and Controlled functor. + /// ## targets + /// Nested arrays describing the targets of the op. Each array should contain a list of ints describing + /// the indices to be used. + /// ## register + /// Qubit register to be acted upon. + /// + /// ## Example + /// // The following applies Exp([PauliX, PauliY], 0.5) to qubits 0, 1 + /// // then X to qubit 2 + /// let ops = [Exp([PauliX, PauliY], 0.5, _), ApplyToFirstQubitCA(X, _)]; + /// let indices = [[0, 1], [2]]; + /// ApplySeriesOfOpsCA(ops, indices, qubitArray); + /// + /// # See Also + /// - Microsoft.Quantum.Canon.ApplyOpRepeatedlyOver + operation ApplySeriesOfOpsCA<'T>(listOfOps : ('T[] => Unit is Adj + Ctl)[], targets : Int[][], register : 'T[]) : Unit is Adj + Ctl{ + if (Length(listOfOps) != Length(targets)) { + fail "The number of ops and number of targets do not match!"; + } + for ((op, targetIndices) in Zip(listOfOps, targets)) { + if (Length(targetIndices) > Length(register)) { + fail "There are too many targets!"; + } + op(Subarray(targetIndices, register)); + } + } + + /// # Summary + /// Applies the same op over a qubit register multiple times. + /// + /// # Input + /// ## op + /// An operation to be applied multiple times on the qubit register + /// ## targets + /// Nested arrays describing the targets of the op. Each array should contain a list of ints describing + /// the qubits to be used. + /// ## register + /// Qubit register to be acted upon. + /// + /// # See Also + /// - Microsoft.Quantum.Canon.ApplySeriesOfOps + operation ApplyOpRepeatedlyOver(op : (Qubit[] => Unit), targets : Int[][], register : Qubit[]) : Unit + { + for (target in targets) + { + if (Length(target) > Length(register)) + { + fail "Too many targets!"; + } + let opTargets = Subarray(target, register); + op(opTargets); + } + } + + /// # Summary + /// Applies the same op over a qubit register multiple times. + /// + /// # Input + /// ## op + /// An operation to be applied multiple times on the qubit register + /// ## targets + /// Nested arrays describing the targets of the op. Each array should contain a list of ints describing + /// the qubits to be used. + /// ## register + /// Qubit register to be acted upon. + /// + /// # See Also + /// - Microsoft.Quantum.Canon.ApplySeriesOfOps + operation ApplyOpRepeatedlyOverA(op : (Qubit[] => Unit is Adj), targets : Int[][], register : Qubit[]) : Unit is Adj + { + for (target in targets) + { + if (Length(target) > Length(register)) + { + fail "Too many targets!"; + } + let opTargets = Subarray(target, register); + op(opTargets); + } + } + + /// # Summary + /// Applies the same op over a qubit register multiple times. + /// + /// # Input + /// ## op + /// An operation to be applied multiple times on the qubit register + /// ## targets + /// Nested arrays describing the targets of the op. Each array should contain a list of ints describing + /// the qubits to be used. + /// ## register + /// Qubit register to be acted upon. + /// + /// # See Also + /// - Microsoft.Quantum.Canon.ApplySeriesOfOps + operation ApplyOpRepeatedlyOverC(op : (Qubit[] => Unit is Ctl), targets : Int[][], register : Qubit[]) : Unit is Ctl + { + for (target in targets) + { + if (Length(target) > Length(register)) + { + fail "Too many targets!"; + } + let opTargets = Subarray(target, register); + op(opTargets); + } + } + + /// # Summary + /// Applies the same op over a qubit register multiple times. + /// + /// # Input + /// ## op + /// An operation to be applied multiple times on the qubit register + /// ## targets + /// Nested arrays describing the targets of the op. Each array should contain a list of ints describing + /// the qubits to be used. + /// ## register + /// Qubit register to be acted upon. + /// + /// # See Also + /// - Microsoft.Quantum.Canon.ApplySeriesOfOps + operation ApplyOpRepeatedlyOverCA(op : (Qubit[] => Unit is Adj+Ctl), targets : Int[][], register : Qubit[]) : Unit is Adj+Ctl + { + for (target in targets) + { + if (Length(target) > Length(register)) + { + fail "Too many targets!"; + } + let opTargets = Subarray(target, register); + op(opTargets); + } + } + +} diff --git a/Standard/src/Canon/CommonGates.qs b/Standard/src/Canon/CommonGates.qs index e3f74f10a2c..fafc2dcd31f 100644 --- a/Standard/src/Canon/CommonGates.qs +++ b/Standard/src/Canon/CommonGates.qs @@ -4,6 +4,8 @@ namespace Microsoft.Quantum.Canon { open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Arithmetic; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Arrays; /// # Summary /// Applies the controlled-X (CX) gate to a pair of qubits. @@ -272,4 +274,30 @@ namespace Microsoft.Quantum.Canon { CZ(qubit1, qubit2); } + /// # Summary + /// Permutes qubits by using the SWAP operation. + /// + /// # Input + /// ## ordering + /// Describes the new ordering of the qubits, where the qubit at index i will now be at ordering[i]. + /// ## register + /// Qubit register to be acted upon. + /// + /// # Example + /// Given ordering = [2, 1, 0] and register $\ket{\alpha_0} \ket{\alpha_1} \ket{\alpha_2}$, PermuteQubits + /// changes the register into $\ket{\alpha_2} \ket{\alpha_1} \ket{\alpha_0}$ + /// + /// ```qsharp + /// // The following two lines are equivalent + /// PermuteQubits([2, 1, 0], register); + /// SWAP(register[0], register[2]); + /// ``` + operation PermuteQubits(ordering : Int[], register : Qubit[]) : Unit is Adj+Ctl { + EqualityFactI(Length(ordering), Length(register), "The new ordering has an incorrect number of elements"); + + for ((left, right) in _SwapOrderToPermuteArray(ordering)) { + SWAP(register[left], register[right]); + } + } + } diff --git a/Standard/tests/ApplyRepeatedOverTests.qs b/Standard/tests/ApplyRepeatedOverTests.qs new file mode 100644 index 00000000000..31cdc1ca0fb --- /dev/null +++ b/Standard/tests/ApplyRepeatedOverTests.qs @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Tests { + + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Arrays; + + + operation ApplySeriesOfOpsTest() : Unit { + // create the sample ops + their targets here + let op1 = ApplyToFirstQubit(X, _); + let op2 = ApplyToFirstTwoQubits(CNOT, _); + let op3 = Exp([PauliX, PauliZ, PauliY], 0.2002, _); + let op4 = ApplyToEachA(H, _); + let target1 = [0]; + let target2 = [0, 4]; + let target3 = [2, 3, 5]; + let target4 = [1, 2, 3, 4]; + + let listOfOps = [op1, op2, op3, op4]; + let listOfTargets = [target1, target2, target3, target4]; + AssertOperationsEqualReferenced(6, ApplySeriesOfOps(listOfOps, listOfTargets, _), _SampleApplySeriesOfOps(_)); + } + + // Helper method for ApplySeriesOfOpsTest + operation _SampleApplySeriesOfOps(register : Qubit[]) : Unit is Adj + Ctl { + // replicate those ops implemented here + X(register[0]); + CNOT(register[0], register[4]); + Exp([PauliX, PauliZ, PauliY], 0.2002, Subarray([2, 3, 5], register)); + ApplyToEachCA(H, Subarray([1, 2, 3, 4], register)); + } + + operation ApplyRepeatedOpTest() : Unit { + let op = ApplyToFirstThreeQubits(CCNOT, _); + let targets = [[0, 1, 2], [2, 1, 0], [3, 4, 5], [2, 4, 0], [5, 3, 1]]; + AssertOperationsEqualReferenced(6, ApplyOpRepeatedlyOver(op, targets, _), _SampleApplyRepeatedOp(_)); + } + + // Helper method for ApplyRepeatedOpTest + operation _SampleApplyRepeatedOp(register : Qubit[]) : Unit is Adj + Ctl { + CCNOT(register[0], register[1], register[2]); + CCNOT(register[2], register[1], register[0]); + CCNOT(register[3], register[4], register[5]); + CCNOT(register[2], register[4], register[0]); + CCNOT(register[5], register[3], register[1]); + } + + operation PermuteQubitsTest() : Unit { + let sampleOrder = [5, 3, 2, 0, 1, 4]; + AssertOperationsEqualReferenced(6, PermuteQubits(sampleOrder, _) , _SamplePermuteQubits); + } + + // Helper method for PermuteQubitsTest + operation _SamplePermuteQubits(register : Qubit[]) : Unit is Adj + Ctl { + // assumes the order to be swapped is [(0, 5),(0, 4),(0, 1),(0, 3)] + // (Order is [5, 3, 2, 0, 1, 4]) + SWAP(register[0], register[5]); + SWAP(register[0], register[4]); + SWAP(register[0], register[1]); + SWAP(register[0], register[3]); + } + +} diff --git a/Standard/tests/ArrayTests.qs b/Standard/tests/ArrayTests.qs index 0cf9e169289..256b42468dd 100644 --- a/Standard/tests/ArrayTests.qs +++ b/Standard/tests/ArrayTests.qs @@ -5,61 +5,61 @@ namespace Microsoft.Quantum.Tests { open Microsoft.Quantum.Canon; open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Arrays; - - + + function ZipTest () : Unit { - + let left = [1, 2, 101]; let right = [PauliY, PauliI]; let zipped = Zip(left, right); let (leftActual1, rightActual1) = zipped[0]; - + if (leftActual1 != 1 or rightActual1 != PauliY) { fail $"Expected (1, PauliY), got ({leftActual1}, {rightActual1})."; } - + let (leftActual2, rightActual2) = zipped[1]; - + if (leftActual2 != 2 or rightActual2 != PauliI) { fail $"Expected (2, PauliI), got ({leftActual2}, {rightActual2})."; } } - - + + function LookupTest () : Unit { - + let array = [1, 12, 71, 103]; let fn = LookupFunction(array); EqualityFactI(fn(0), 1, $"fn(0) did not return array[0]"); - + // Make sure we can call in random order! EqualityFactI(fn(3), 103, $"fn(3) did not return array[3]"); EqualityFactI(fn(2), 71, $"fn(2) did not return array[2]"); EqualityFactI(fn(1), 12, $"fn(1) did not return array[1]"); } - - + + function ConstantArrayTestHelper (x : Int) : Int { - + return x * x; } - - + + function ConstantArrayTest () : Unit { - + let dblArray = ConstantArray(71, 2.17); EqualityFactI(Length(dblArray), 71, $"ConstantArray(Int, Double) had the wrong length."); let ignore = Mapped(NearEqualityFactD(_, 2.17), dblArray); - + // Stress test by making an array of Int -> Int. let fnArray = ConstantArray(7, ConstantArrayTestHelper); EqualityFactI(Length(fnArray), 7, $"ConstantArray(Int, Int -> Int) had the wrong length."); EqualityFactI(fnArray[3](7), 49, $"ConstantArray(Int, Int -> Int) had the wrong value."); } - - + + function SubarrayTest () : Unit { - + let array0 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let subarrayOdd = Subarray([1, 3, 5, 7, 9], array0); let subarrayEven = Subarray([0, 2, 4, 6, 8, 10], array0); @@ -68,34 +68,34 @@ namespace Microsoft.Quantum.Tests { let array1 = [10, 11, 12, 13]; Ignore(Mapped(EqualityFactI(_, _, $"Subarray failed: subpermutation case."), Zip([12, 11], Subarray([2, 1], array1)))); } - - + + function FilterTest () : Unit { - + let array = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let evenArray = Filtered(IsEven, array); EqualityFactB(All(IsEven, evenArray), true, $"the even elements of [1..10] were not correctly filtered."); } - - + + function ReverseTest () : Unit { - + let array = [1, 2, 3]; Ignore(Mapped(EqualityFactI(_, _, $"Reversed failed."), Zip([3, 2, 1], Reversed(array)))); } - - + + function ExcludeTest () : Unit { - + let array = [10, 11, 12, 13, 14, 15]; Ignore(Mapped(EqualityFactI(_, _, $"Exclude failed."), Zip([10, 11, 13, 14], Exclude([2, 5], array)))); } - - + + function PadTest () : Unit { - + mutable arrayTestCase = [(-5, 2, [10, 11, 12], [10, 11, 12, 2, 2]), (5, 2, [10, 11, 12], [2, 2, 10, 11, 12]), (-3, -2, [10, 11, 12], [10, 11, 12])]; - + for (idxTest in IndexRange(arrayTestCase)) { let (nElementsTotal, defaultElement, inputArray, outputArray) = arrayTestCase[idxTest]; let paddedArray = Padded(nElementsTotal, defaultElement, inputArray); @@ -161,6 +161,47 @@ namespace Microsoft.Quantum.Tests { Fact(not IsEmpty([""]), "Non-empty array marked as empty."); } + function _SwapOrderToPermuteArrayTest() : Unit { + let newOrder = [0, 4, 2, 1, 3]; + let expected = [(1, 4), (1, 3)]; + let actual = _SwapOrderToPermuteArray(newOrder); + + EqualityFactI(Length(expected), Length(actual), "Number of swaps does not match"); + for ((exp, act) in Zip(expected, actual)) { + let (leftExp, rightExp) = exp; + let (leftAct, rightAct) = act; + + EqualityFactI(leftExp, leftAct, "SWAP doesn't match"); + EqualityFactI(rightExp, rightAct, "SWAP doesn't match"); + } + } + + function SwappedTest() : Unit { + let example = [2, 4, 6, 8, 10]; + let expected = [2, 8, 6, 4, 10]; + let leftIndex = 1; + let rightIndex = 3; + let newArray = Swapped(leftIndex, rightIndex, example); + + EqualityFactI(Length(expected), Length(newArray), "Swapped array is a different size than original"); + for ((exp, act) in Zip(expected, newArray)) { + EqualityFactI(exp, act, "Elements did not match"); + } + } + + function TupleArrayAsNestedArrayTest() : Unit { + let example = [(0, 1), (2, 3), (4, 5), (6, 7)]; + let expected = [[0, 1], [2, 3], [4, 5], [6, 7]]; + + let actual = TupleArrayAsNestedArray(example); + EqualityFactI(Length(expected), Length(actual), "Arrays are of different sizes"); + for ((exp, act) in Zip(expected, actual)) { + for ((elementExp, elementAct) in Zip(exp, act)) { + EqualityFactI(elementExp, elementAct, "Elements did not match"); + } + } + } + } From 22a3dd59e5c27cf319f5df3fe6552015635560eb Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Fri, 22 Nov 2019 15:25:15 -0800 Subject: [PATCH 04/43] Fix merge conflict. --- Standard/src/Arrays/Arrays.qs | 2 -- 1 file changed, 2 deletions(-) diff --git a/Standard/src/Arrays/Arrays.qs b/Standard/src/Arrays/Arrays.qs index 71bb8be81c7..13a111729e5 100644 --- a/Standard/src/Arrays/Arrays.qs +++ b/Standard/src/Arrays/Arrays.qs @@ -296,8 +296,6 @@ namespace Microsoft.Quantum.Arrays { return Length(array) == 0; } - -} function _IsPermutationPred(permutation : Int[], value : Int) : Bool { let index = IndexOf(EqualI(value, _), permutation); return index != -1; From 1423acaf84194a21bd21771614849ca3ec16e03d Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Tue, 26 Nov 2019 15:51:33 -0500 Subject: [PATCH 05/43] Simplify classification API. (#179) * Removed unused "adapter" operations. * Removed unused operation. * Consolidated two APIs for estimating classification probabilities. * Removed old version of CircuitResult operation. * Simplified classification operation slightly. * Eliminated unused operation. * Clarified InferredLabel, updated argument order to respect currying. * Moved bias update into training, fixed bug. * Renamed operations to estimate classification probabilities. * Simplify TallyHitsMisses. * Trivial commit. * Addressing @bettinaheim's feedback. --- MachineLearning/src/Runtime/Circuits.qs | 650 +----------------- MachineLearning/src/Runtime/Classification.qs | 127 +++- MachineLearning/src/Runtime/InputEncoding.qs | 8 +- MachineLearning/src/Runtime/Training.qs | 101 ++- MachineLearning/src/Runtime/Types.qs | 19 + MachineLearning/src/Runtime/Validation.qs | 2 +- 6 files changed, 244 insertions(+), 663 deletions(-) diff --git a/MachineLearning/src/Runtime/Circuits.qs b/MachineLearning/src/Runtime/Circuits.qs index 77661be4a8d..51bbd02a70e 100644 --- a/MachineLearning/src/Runtime/Circuits.qs +++ b/MachineLearning/src/Runtime/Circuits.qs @@ -77,146 +77,6 @@ namespace Microsoft.Quantum.MachineLearning { return 1.0-EstimateFrequencyA(endToEndHTcircuit(enc2,param1,gates1,param2,gates2),measureLastQubit(nQubits), nQubits, nMeasurements); } - operation QubitProbPhysical(enc: (LittleEndian => Unit is Adj + Ctl), parameters: Double[], gates: GateSequence, nQubits: Int, nMeasurements : Int) - : Double { - return 1.0 - EstimateFrequencyA( - endToEndPreparation(enc,parameters,gates), - measureLastQubit(nQubits), - nQubits, - nMeasurements - ); - } - - operation CircuitResultClassical(tolerance: Double, parameters : Double[], gates: GateSequence, sample: Double[], nMeasurements: Int) : Double - { - let dL = IntAsDouble (Length(sample)); - let N = Microsoft.Quantum.Math.Ceiling(Lg(dL)); - let circEnc = NoisyInputEncoder(tolerance/IntAsDouble(Length(gates!)),sample); - let rslt = QubitProbPhysical(circEnc, parameters,gates, N, nMeasurements); - return rslt; - - } - - - /// # Summary - /// Classify one sample; the label part of the container is ignored - /// - /// # Input - /// ## measCount - /// the number of measurements used - /// - /// ## sg - /// generates quantum encoding of a subject sample (either simulated or true) - /// - /// ## param - /// circuit parameters - /// - /// ## gates - /// sequence of gates in the circuit - /// - /// ## bias - /// postselection bias of the model - /// - /// # Output - /// post-selected class label - /// - operation ClassifyOneSimulated(tolerance: Double, sample: LabeledSample, parameters : Double[], gates: GateSequence, bias: Double, nMeasurements: Int): Int - { - let dL = IntAsDouble (Length(getData(sample))); - mutable N = Microsoft.Quantum.Math.Ceiling(Lg(dL)); - let qsp = qubitSpan(gates); - if (N < qsp) - { - set N = qsp; - } - let circEnc = NoisyInputEncoder(tolerance/IntAsDouble(Length(gates!)), getData(sample)); - return bias + QubitProbPhysical(circEnc, parameters, gates, N, nMeasurements) > 0.5 ? 1 | 0; - } - - - /// # Summary - /// Quantum-lawful estimation of postselection probability of |1> - /// - /// # Input - /// ## measCount - /// the number of measurements used - /// - /// ## sg - /// generates quantum encoding of a subject sample (either simulated or true) - /// - /// ## param - /// circuit parameters - /// - /// ## gates - /// sequence of gates in the circuit - /// - /// # Output - /// the probability estimate - /// - operation CircuitResult (measCount: Int, sg: StateGenerator, parameters : Double[], gates: GateSequence) : Double { - - mutable countOne = 0.0; - mutable qCount = qubitSpan(gates); - if (qCount < Fst(sg!)) - { - set qCount = Fst(sg!); - } - let measIdx = qCount - 1; - let circEnc = Snd(sg!); - for (ep in 1..measCount) - { - using (qubits = Qubit[qCount]) - { - //let circEnc = InputEncoder(coefficients); //usage insights - //let qubitsBE = LittleEndian(qubits); - circEnc(LittleEndian(qubits)); - _ApplyGates(parameters, gates, qubits); - //dumpRegisterToConsole(qubits); - - let rslt = M(qubits[measIdx]); - if (rslt == One) - { - set countOne = countOne + 1.0; - } - - for(i in 0..qCount-1) - { - Set(Zero, qubits[i]); - } - } - } - - // Return number of times we saw a |1> - return countOne/IntAsDouble (measCount); - - } - - /// # Summary - /// Classify one sample represented as a state generator - /// - /// # Input - /// ## measCount - /// the number of measurements used - /// - /// ## sg - /// generates quantum encoding of a subject sample (either simulated or true) - /// - /// ## param - /// circuit parameters - /// - /// ## gates - /// sequence of gates in the circuit - /// - /// ## bias - /// postselection bias of the model - /// - /// # Output - /// post-selected class label - /// - operation ClassifyOne (measCount: Int, sg: StateGenerator, parameters : Double[], gates: GateSequence, bias: Double) : (Int) - { - return CircuitResult(measCount,sg,parameters,gates)+bias > 0.5 ? 1 | 0; - } /// # Summary @@ -256,7 +116,6 @@ namespace Microsoft.Quantum.MachineLearning { let pC = Length(param); mutable grad = ConstantArray(pC, 0.0); mutable paramShift = param + [0.0]; - // let sqNorm0 = CircuitResultHack(param, gates, register); let nQubits = MaxI(NQubitsRequired(gates), sg::NQubits); for (gate in gates!) { @@ -342,165 +201,34 @@ namespace Microsoft.Quantum.MachineLearning { /// ## gates /// the sequence of gates in the circuit /// - /// ## measCount + /// ## nMeasurements /// the maximum number of quantum measurements used in the probability estimation - /// IMPORTANT: measCount==0 implies deployment to simulator /// /// # Output - /// (no.hits, no.misses) pair - /// - operation ClassificationProbabilitiesClassicalData(samples: LabeledSample[], sched: SamplingSchedule, param: Double[], gates: GateSequence, measCount: Int): - (Double,Int)[] - { - mutable ret = [(0.0,0)]; - mutable sC = 0; - for (rg in sched!) - { - for (ix in rg) - { - set sC += 1; - } - } - mutable N = qubitSpan(gates); - if (Length(samples)>0) - { - let dL =Microsoft.Quantum.Math.Ceiling(Lg(IntAsDouble (Length(getData(Head(samples)))))); - if (N < dL) - { - set N = dL; - } - } - set ret = new (Double,Int)[sC]; - mutable ir = 0; + /// TODO + operation ClassificationProbabilitiesClassicalData(samples: LabeledSample[], sched: SamplingSchedule, param: Double[], gates: GateSequence, nMeasurements: Int): + (Double,Int)[] { + mutable N = IsEmpty(samples) + ? NQubitsRequired(gates) + | MaxI(NQubitsRequired(gates), FeatureRegisterSize(_Features(Head(samples)))); + mutable ret = new (Double, Int)[0]; for (rg in sched!) { for (ix in rg) { - let samp = samples[ix]; + let sample = samples[ix]; //agnostic w.r.t. simulator (may still be simulable) - let prob1 = CircuitResultClassical(1E-12,param, gates, getData(samp),measCount); - set ret w/= ir <- (prob1, getLabel(samp)); - set ir += 1; + let prob1 = EstimateClassificationProbabilityFromSample(1E-12, param, gates, sample::Features, nMeasurements); + set ret += [(prob1, sample::Label)]; } } return ret; } - - - /// # Summary - /// Get a list of all the classification probabilities. In the from of (prob1,label) pairs. THIS operation is IN DEPRECATION - /// - /// # Input - /// ## samples - /// a container of labeled samples - /// - /// ## sched - /// a schedule to define a subset of samples - /// - /// ## nQubits - /// number of cubits in the classification circuit - /// - /// ## gates - /// the sequence of gates in the circuit - /// - /// ## param - /// parameters of the circuits - /// - /// ## measCount - /// - /// # Output - /// array of corresponding estimated probabilities of the top class label - /// - operation EstimateClassificationProbabilitiesClassicalData( - tolerance : Double, samples : Double[][], sched : SamplingSchedule, - nQubits : Int, gates : GateSequence, param : Double[], - nMeasurements : Int - ) : Double[] { - let effectiveTolerance = tolerance / IntAsDouble(Length(gates!)); - mutable ret = new Double[0]; - for (rg in sched!) { - for (ix in rg) { - let samp = samples[ix]; - let circEnc = NoisyInputEncoder(effectiveTolerance, samp); - set ret += [QubitProbPhysical(circEnc, param, gates, nQubits, nMeasurements)]; - } - } - - return ret; - } //EstimateClassificationProbabilitiesClassicalData - - operation EstimateClassificationProbabilitiesClassicalDataAdapter(tolerance: Double, samples: Double[][], schedule: Int[][], nQubits: Int, gates: Int[][], param: Double[], measCount: Int): Double[] { return EstimateClassificationProbabilitiesClassicalData(tolerance, samples, unFlattenSchedule(schedule), nQubits, unFlattenGateSequence(gates), param, measCount); } - operation PrepareUniformSuperpositionLE(reg : LittleEndian) : Unit is Adj + Ctl { - ApplyToEachCA(H, reg!); - } - - /// # Summary - /// Get a list of all the classification probabilities. In the from of (prob1,label) pairs. - /// - /// # Input - /// ## samples - /// a container of labeled samples - /// - /// ## sched - /// a schedule to define a subset of samples - /// - /// ## param - /// parameters of the circuits - /// - /// ## gates - /// the sequence of gates in the circuit - /// - /// ## measCount - /// the maximum number of quantum measurements used in the probability estimation - /// IMPORTANT: measCount==0 implies deployment to simulator - /// - /// # Output - /// List if triplets of the form (sample index, sample probaility, sample label) - /// - operation ClassificationTripletsClassicalData(samples: LabeledSample[], sched: SamplingSchedule, param: Double[], gates: GateSequence, measCount: Int): - (Int, Double, Int)[] - { - mutable ret = [(-1,0.0,0)]; - mutable sC = 0; - for (rg in sched!) - { - for (ix in rg) - { - set sC = sC +1; - } - } - mutable N = qubitSpan(gates); - if (not IsEmpty(samples)) { - let dL =Microsoft.Quantum.Math.Ceiling(Lg(IntAsDouble (Length(getData(Head(samples)))))); - if (N < dL) - { - set N = dL; - } - } - set ret = new (Int,Double,Int)[sC]; - mutable ir = 0; - for (rg in sched!) - { - for (ix in rg) - { - let samp = samples[ix]; - let data = getData(samp); - let circEnc = InputEncoder(data); - let sg = StateGenerator((N,circEnc)); - let prob1 = CircuitResult(measCount, sg, param, gates); - set ret w/=ir<-(ix,prob1,getLabel(samp)); - set ir = ir+1; - } - } - - return ret; - } - /// # Summary /// tallies hits and misses off a list of probability estimates /// @@ -517,34 +245,14 @@ namespace Microsoft.Quantum.MachineLearning { function TallyHitsMisses(pls: (Double, Int)[], bias: Double) : (Int, Int) { mutable hits = 0; mutable misses = 0; - for (pl in pls) - { - if (Fst(pl)+bias>0.5) - { - if (Snd(pl)<1) - { - //Misclassification - set misses=misses+1; - } - else - { - set hits=hits+1; - } - } - else - { - if (Snd(pl)>0) - { - //Misclassification - set misses=misses+1; - } - else - { - set hits=hits+1; - } + for ((classificationProbability, label) in pls) { + if (label == InferredLabel(bias, classificationProbability)) { + set hits += 1; + } else { + set misses += 1; } } - return (hits,misses); + return (hits, misses); } /// # Summary @@ -625,323 +333,6 @@ namespace Microsoft.Quantum.MachineLearning { return m1; } - - /// # Summary - /// C#-friendly adapter to misclassification tally - /// - /// # Input - /// ## vectors - /// data vectors in flat encoding - /// - /// ## labels - /// array of corresponding class lables - /// - /// ## schedule - /// flat representation of index subset on which the circuit is scored - /// - /// ## param - /// circuit parameters - /// - /// ## gateStructure - /// gate structure in flat representation - /// - /// ## bias - /// prediction bias to be tested - /// - /// ## measCount - /// maximum number of quantum measurements per estimation (measCount==0 implies simulator deployment) - /// - /// # Output - /// schedule of indices of misclassified samples - /// - operation MisclassificationsAsScheduleAdapter(vectors: Double[][], labels: Int[], schedule: Int[][], param: Double[], gateStructure: Int[][], bias: Double, measCount: Int) : Int[][] - { - mutable misses = new Int[][0]; - let samples = unFlattenLabeledSamples(vectors,labels); - let gates = unFlattenGateSequence(gateStructure); - let sched = unFlattenSchedule(schedule); - - let pls = ClassificationTripletsClassicalData(samples,sched,param,gates,measCount); - mutable tmp = new (Double,Int)[Length(pls)]; - for (it in 0..(Length(tmp)-1)) - { - let (a,b,c) = pls[it]; - set tmp w/=it<-(b,c); - } - let biasCurrent = adjustBias(tmp, bias, 0.01, 10); - for (pl in pls) - { - let (ix,pp,lb) = pl; - if (pp+biasCurrent>0.5) - { - if (lb <1) - { - //Misclassification - set misses=misses + [[ix,1,ix]]; - } - } - else - { - if (lb>0) - { - //Misclassification - set misses=misses + [[ix,1,ix]]; - } - } - } - return misses; - } - - - /// # Summary - /// C#-friendly adapter to misclassification tally - /// - /// # Input - /// ## vectors - /// data vectors in flat encoding - /// - /// ## labels - /// array of corresponding class lables - /// - /// ## schedule - /// flat representation of index subset on which the circuit is scored - /// - /// ## param - /// circuit parameters - /// - /// ## gateStructure - /// gate structure in flat representation - /// - /// ## bias - /// prediction bias to be tested - /// - /// ## measCount - /// maximum number of quantum measurements per estimation (measCount==0 implies simulator deployment) - /// - /// # Output - /// schedule of indices of misclassified samples - /// - operation TestMisclassificationsAsScheduleAdapter(vectors: Double[][], labels: Int[], schedule: Int[][], param: Double[], gateStructure: Int[][], bias: Double, measCount: Int) : Int[][] - { - mutable misses = new Int[][0]; - let samples = unFlattenLabeledSamples(vectors,labels); - let gates = unFlattenGateSequence(gateStructure); - let sched = unFlattenSchedule(schedule); - - let pls = ClassificationTripletsClassicalData(samples,sched,param,gates,measCount); - mutable tmp = new (Double,Int)[Length(pls)]; - for (it in 0..(Length(tmp)-1)) - { - let (a,b,c) = pls[it]; - set tmp w/=it<-(b,c); - } - let biasCurrent = bias; - for (pl in pls) - { - let (ix,pp,lb) = pl; - if (pp+biasCurrent>0.5) - { - if (lb <1) - { - //Misclassification - set misses=misses + [[ix,1,ix]]; - } - } - else - { - if (lb>0) - { - //Misclassification - set misses=misses + [[ix,1,ix]]; - } - } - } - return misses; - } - - - - /// # Summary - /// C#-friendly adapter to misclassification tally - /// - /// # Input - /// ## vectors - /// data vectors in flat encoding - /// - /// ## labels - /// array of corresponding class lables - /// - /// ## schedule - /// flat representation of index subset on which the circuit is scored - /// - /// ## param - /// circuit parameters - /// - /// ## gateStructure - /// gate structure in flat representation - /// - /// ## bias - /// prediction bias to be tested - /// - /// ## measCount - /// maximum number of quantum measurements per estimation (measCount==0 implies simulator deployment) - /// - /// # Output - /// (bias, schedule of indices of misclassified samples) - /// - operation BiasAndMisclassificationsAsScheduleAdapter(vectors: Double[][], labels: Int[], schedule: Int[][], param: Double[], gateStructure: Int[][], bias: Double, measCount: Int) : (Double,Int[][]) - { - mutable misses = new Int[][0]; - let samples = unFlattenLabeledSamples(vectors,labels); - let gates = unFlattenGateSequence(gateStructure); - let sched = unFlattenSchedule(schedule); - - let pls = ClassificationTripletsClassicalData(samples,sched,param,gates,measCount); - mutable tmp = new (Double,Int)[Length(pls)]; - for (it in 0..(Length(tmp)-1)) - { - let (a,b,c) = pls[it]; - set tmp w/=it<-(b,c); - } - let biasCurrent = adjustBias(tmp, bias, 0.01, 10); - for (pl in pls) - { - let (ix,pp,lb) = pl; - if (pp+biasCurrent>0.5) - { - if (lb <1) - { - //Misclassification - set misses=misses + [[ix,1,ix]]; - } - } - else - { - if (lb>0) - { - //Misclassification - set misses=misses + [[ix,1,ix]]; - } - } - } - return (biasCurrent, misses); - } - - - /// # Summary - /// Semi-greedily find a bias value that leads to near-minimum misclassification score - /// - operation recomputeBias(probabilities: Double[], labels: Int[], sched: SamplingSchedule, bias: Double, tolerance: Double, maxIter: Int) : Double - { - mutable min1 = 1.0; - mutable max0 = 0.0; - mutable ipro = 0; - for (rg in sched!) - { - for(ix in rg) - { - let prob = probabilities[ipro]; - let lab = labels[ix]; - if (lab > 0) - { - if (min1 > prob) - { - set min1 = prob; - } - } - else - { - if (max0 < prob) - { - set max0 = prob; - } - } - set ipro = ipro +1 ; - } - } //rof - if (max0 <= min1) - { - return 0.5*(1.0-max0-min1); //Gives a perfect classification - } - mutable mBest = Length(probabilities); - mutable bBest = bias; - mutable bLeft = 0.5-max0; - mutable bRight = 0.5-min1; - mutable bestDir = 0; - mutable proposedLabels = InferredLabels(probabilities,bLeft); - mutable mLeft = NMismatches(proposedLabels, labels, sched); - if (mLeft < mBest) - { - set bBest = bLeft; - set mBest = mLeft; - set bestDir = -1; - } - set proposedLabels = InferredLabels(probabilities,bRight); - mutable mRight = NMismatches(proposedLabels, labels, sched); - if (mRight < mBest) - { - set bBest = bRight; - set mBest = mRight; - set bestDir = 1; - } - - for (iter in 1..maxIter) - { - if ((bRight - bLeft) < tolerance) - { - return bBest; - } - let bMiddle = 0.5*(bLeft+bRight); - set proposedLabels = InferredLabels(probabilities,bMiddle); - let mMiddle = NMismatches(proposedLabels, labels, sched); - - if (mMiddle < mLeft) - { - if (bestDir > 0) //replace the weaker end - { - set bLeft = bMiddle; - set mLeft = mMiddle; - - if (mMiddle < mBest) - { - set bBest = bMiddle; - set mBest = mMiddle; - set bestDir = -1; //note that the left end is now better - } - } - else //right end was the weaker end - { - set bRight = bMiddle; - set mRight = mMiddle; - if (mMiddle < mBest) - { - set bBest = bMiddle; - set mBest = mMiddle; - set bestDir = 1; //note that the right end is now better - } - } - //Done with the left end - } - else - { - - if (mMiddle < mRight) - { - //We are better than the right but worse than the left - //Hence the right must be weaker - set bRight = bMiddle; - set mRight = mMiddle; - } - else - { - return bBest; //cannot continue the greedy search - } - } - - } - return bias; - } //recomputeBias - /// # Summary /// Semi-greedily find a bias value that leads to near-minimum misclassification score /// @@ -1142,12 +533,5 @@ namespace Microsoft.Quantum.MachineLearning { return ret; } - function InferredLabels(probabilities: Double[], bias: Double): Int[] { - mutable ret = new Int[Length(probabilities)]; - for (il in 0..(Length(probabilities) - 1)) { - set ret w/= il <- probabilities[il] + bias > 0.5 ? 1 | 0; - } - return ret; - } } diff --git a/MachineLearning/src/Runtime/Classification.qs b/MachineLearning/src/Runtime/Classification.qs index b254a7fc1d4..65c3e67293e 100644 --- a/MachineLearning/src/Runtime/Classification.qs +++ b/MachineLearning/src/Runtime/Classification.qs @@ -1,35 +1,114 @@ namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Characterization; + open Microsoft.Quantum.Arithmetic; + open Microsoft.Quantum.Arrays; open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Convert; + + operation EstimateClassificationProbabilityFromEncodedSample( + encodedSample : StateGenerator, + parameters: Double[], + gates: GateSequence, nMeasurements : Int + ) + : Double { + return 1.0 - EstimateFrequencyA( + endToEndPreparation(encodedSample::Apply, parameters,gates), + measureLastQubit(encodedSample::NQubits), + encodedSample::NQubits, + nMeasurements + ); + } + + operation EstimateClassificationProbabilityFromSample(tolerance: Double, parameters : Double[], gates: GateSequence, sample: Double[], nMeasurements: Int) + : Double { + let nQubits = FeatureRegisterSize(sample); + let circEnc = NoisyInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); + return EstimateClassificationProbabilityFromEncodedSample( + StateGenerator(nQubits, circEnc), parameters, gates, nMeasurements + ); + + } - /// # Summary - /// Using a flat description of a classification model, assign estimated probability of the top class label - /// to each vector in the test set + /// Given a of classification probability and a bias, returns the + /// label inferred from that probability. /// /// # Input + /// ## bias + /// The bias between two classes, typically the result of training a + /// classifier. + /// ## probability + /// A classification probabilities for a particular sample, typicaly + /// resulting from estimating its classification frequency. + /// + /// # Output + /// The label inferred from the given classification probability. + function InferredLabel(bias : Double, probability : Double) : Int { + return probability + bias > 0.5 ? 1 | 0; + } + + /// # Summary + /// Given an array of classification probabilities and a bias, returns the + /// label inferred from each probability. + /// + /// # Input + /// ## bias + /// The bias between two classes, typically the result of training a + /// classifier. + /// ## probabilities + /// An array of classification probabilities for a set of samples, typicaly + /// resulting from estimating classification frequencies. + /// + /// # Output + /// The label inferred from each classification probability. + function InferredLabels(bias : Double, probabilities : Double[]): Int[] { + return Mapped(InferredLabel(bias, _), probabilities); + } + + /// # Summary + /// Estimates all classification probabilities for a given dataset. + /// + /// # Input + /// ## samples + /// a container of labeled samples + /// + /// ## sched + /// a schedule to define a subset of samples + /// /// ## nQubits - /// the number of qubits used for data encoding + /// number of qubits in the classification circuit /// /// ## gates - /// flat characterization of circuit structure. Each element is [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] + /// the sequence of gates in the circuit /// - /// ## parameters - /// an array of circuit parameters - /// - /// ## testSet - /// the set of vectors to be labeled + /// ## param + /// parameters of the circuits /// - /// ## nMeasurenets - /// number of the measurement cycles to be used for estimation of each probability + /// ## measCount /// /// # Output - /// Array of estimated probabilities of top class label (for each sample in the test set) + /// array of corresponding estimated probabilities of the top class label /// - operation EstimateClassificationProbabilities(tolerance: Double, nQubits: Int, gates: Int[][], parameters: Double[], testSet: Double[][], nMeasurements: Int) : Double[] - { - let segSched = [0..1..Length(testSet)-1]; - return EstimateClassificationProbabilitiesClassicalData(tolerance, testSet, SamplingSchedule(segSched), nQubits, unFlattenGateSequence(gates), parameters, nMeasurements); + operation EstimateClassificationProbabilitiesClassicalData( + tolerance : Double, samples : Double[][], sched : SamplingSchedule, + nQubits : Int, gates : GateSequence, param : Double[], + nMeasurements : Int + ) : Double[] { + let effectiveTolerance = tolerance / IntAsDouble(Length(gates!)); + mutable ret = new Double[0]; + for (rg in sched!) { + for (ix in rg) { + let samp = samples[ix]; + set ret += [EstimateClassificationProbabilityFromEncodedSample( + StateGenerator(nQubits, NoisyInputEncoder(effectiveTolerance, samp)), + param, gates, nMeasurements + )]; + } + } + + return ret; } /// # Summary @@ -47,7 +126,7 @@ namespace Microsoft.Quantum.MachineLearning { /// ## parameters /// an array of circuit parameters /// - /// ## testSet + /// ## samples /// the set of vectors to be labeled /// /// ## bias @@ -59,11 +138,13 @@ namespace Microsoft.Quantum.MachineLearning { /// # Output /// Array of predicted class labels for each sample of the test set /// - operation DoClassification(tolerance: Double, nQubits: Int, gates: Int[][], parameters: Double[], bias: Double, testSet: Double[][], nMeasurements: Int) : Int[] - { - let probs = EstimateClassificationProbabilities(tolerance, nQubits,gates,parameters,testSet,nMeasurements); - return InferredLabels(probs, bias); + operation DoClassification(tolerance: Double, nQubits: Int, gates: Int[][], parameters: Double[], bias: Double, samples : Double[][], nMeasurements: Int) : Int[] { + let schedule = SamplingSchedule([0..Length(samples) - 1]); + let sequence = unFlattenGateSequence(gates); + let probs = EstimateClassificationProbabilitiesClassicalData( + tolerance, samples, schedule, nQubits, sequence, parameters, nMeasurements + ); + return InferredLabels(bias, probs); } - } diff --git a/MachineLearning/src/Runtime/InputEncoding.qs b/MachineLearning/src/Runtime/InputEncoding.qs index d7f147e1902..cf243f2a3c2 100644 --- a/MachineLearning/src/Runtime/InputEncoding.qs +++ b/MachineLearning/src/Runtime/InputEncoding.qs @@ -25,9 +25,11 @@ namespace Microsoft.Quantum.MachineLearning { function _Unnegate(negLocs: Int[], coefficients : ComplexPolar[]) : ComplexPolar[] { mutable ret = coefficients; for (idxNegative in negLocs) { - let jx = negLocs[idxNegative]; - let coefficient = coefficients[jx]; - set ret w/= jx <- ComplexPolar(coefficient::Magnitude, 0.0); + if (idxNegative >= Length(coefficients)) { + fail $"Cannot set the phase at index {idxNegative}, only {Length(coefficients)} coefficients were provided."; + } + let coefficient = coefficients[idxNegative]; + set ret w/= idxNegative <- ComplexPolar(coefficient::Magnitude, 0.0); } return ret; } diff --git a/MachineLearning/src/Runtime/Training.qs b/MachineLearning/src/Runtime/Training.qs index be3c4cc5abe..dc61875bd55 100644 --- a/MachineLearning/src/Runtime/Training.qs +++ b/MachineLearning/src/Runtime/Training.qs @@ -6,6 +6,101 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Canon; + /// # Summary + /// Returns a bias value that leads to near-minimum misclassification score. + /// + /// # Remarks + /// Note that `probabilities` and `labels` will not in general have the same + /// length, as `labels` is indexed by a training set index while `probabilities` + /// is indexed by the given sampling schedule. + function _UpdatedBias(probabilities: Double[], labels: Int[], sched: SamplingSchedule, bias: Double, tolerance: Double, maxIter: Int) : Double { + mutable min1 = 1.0; + mutable max0 = 0.0; + mutable ipro = 0; + for (rg in sched!) { + for (ix in rg) { + let prob = probabilities[ipro]; + let lab = labels[ix]; + if (lab > 0) { + if (min1 > prob) { + set min1 = prob; + } + } else { + if (max0 < prob) { + set max0 = prob; + } + } + set ipro += 1; + } + } + // Exit early if we can find a perfect classification. + if (max0 <= min1) { + return 0.5 * (1.0 - max0 - min1); + } + mutable mBest = Length(probabilities); + mutable bBest = bias; + mutable bLeft = 0.5 - max0; + mutable bRight = 0.5 - min1; + mutable bestDir = 0; + mutable proposedLabels = InferredLabels(bLeft, probabilities); + mutable mLeft = NMismatches(proposedLabels, labels, sched); + if (mLeft < mBest) { + set bBest = bLeft; + set mBest = mLeft; + set bestDir = -1; + } + set proposedLabels = InferredLabels(bRight, probabilities); + mutable mRight = NMismatches(proposedLabels, labels, sched); + if (mRight < mBest) { + set bBest = bRight; + set mBest = mRight; + set bestDir = 1; + } + + for (iter in 1..maxIter) { + if ((bRight - bLeft) < tolerance) + { + return bBest; + } + let bMiddle = 0.5 * (bLeft+bRight); + set proposedLabels = InferredLabels(bMiddle, probabilities); + let mMiddle = NMismatches(proposedLabels, labels, sched); + + if (mMiddle < mLeft) { + if (bestDir > 0) { //replace the weaker end + set bLeft = bMiddle; + set mLeft = mMiddle; + + if (mMiddle < mBest) { + set bBest = bMiddle; + set mBest = mMiddle; + set bestDir = -1; //note that the left end is now better + } + } else { //right end was the weaker end + set bRight = bMiddle; + set mRight = mMiddle; + if (mMiddle < mBest) { + set bBest = bMiddle; + set mBest = mMiddle; + set bestDir = 1; //note that the right end is now better + } + } + //Done with the left end + } else { + if (mMiddle < mRight) { + // We are better than the right but worse than the left. + // Hence the right must be weaker. + set bRight = bMiddle; + set mRight = mMiddle; + } else { + return bBest; //cannot continue the greedy search + } + } + + } + return bias; + } //recomputeBias + operation TrainSequentialClassifier( nQubits: Int, gates: GateSequence, @@ -27,7 +122,7 @@ namespace Microsoft.Quantum.MachineLearning { let labels = Mapped(_Label, samples); let cTechnicalIter = 10; //10 iterations are sufficient for bias adjustment in most cases - for (idxStart in 0..(Length(parameterSource)-1)) { + for (idxStart in 0..(Length(parameterSource) - 1)) { Message($"Beginning training at start point #{idxStart}..."); let ((h, m), (b, parpar)) = StochasticTrainingLoop( samples, trainingSchedule, trainingSchedule, 1, miniBatchSize, @@ -39,7 +134,7 @@ namespace Microsoft.Quantum.MachineLearning { gates, parpar, nMeasurements ); //Estimate bias here! - let localBias = recomputeBias( + let localBias = _UpdatedBias( probsValidation, labels, validationSchedule, @@ -47,7 +142,7 @@ namespace Microsoft.Quantum.MachineLearning { tolerance, cTechnicalIter ); - let localPL = InferredLabels(probsValidation,localBias); + let localPL = InferredLabels(localBias, probsValidation); let localMisses = NMismatches(localPL, labels, validationSchedule); if (bestValidation > localMisses) { set bestValidation = localMisses; diff --git a/MachineLearning/src/Runtime/Types.qs b/MachineLearning/src/Runtime/Types.qs index 586f014cbe1..7b0d668294e 100644 --- a/MachineLearning/src/Runtime/Types.qs +++ b/MachineLearning/src/Runtime/Types.qs @@ -45,6 +45,25 @@ namespace Microsoft.Quantum.MachineLearning { /// Abstraction for a two-level range of indices newtype SamplingSchedule = Range[]; + /// # Summary + /// Returns the number of elements in a given sampling schedule. + /// + /// # Input + /// ## schedule + /// A sampling schedule whose length is to be returned. + /// + /// # Output + /// The number of elements in the given sampling schedule. + function ScheduleLength(schedule : SamplingSchedule) : Int { + mutable length = 0; + for (range in schedule!) { + for (index in range) { + set length += 1; + } + } + return length; + } + newtype ValidationResults = ( NMisclassifications: Int ); diff --git a/MachineLearning/src/Runtime/Validation.qs b/MachineLearning/src/Runtime/Validation.qs index 49b64e9c69d..bd0f50dff7d 100644 --- a/MachineLearning/src/Runtime/Validation.qs +++ b/MachineLearning/src/Runtime/Validation.qs @@ -66,7 +66,7 @@ namespace Microsoft.Quantum.MachineLearning { let features = Mapped(_Features, samples); let labels = Mapped(_Label, samples); let probsValidation = EstimateClassificationProbabilitiesClassicalData(tolerance, features, validationSchedule, nQubits, gates, parameters, nMeasurements); - let localPL = InferredLabels(probsValidation, bias); + let localPL = InferredLabels(bias, probsValidation); let nMismatches = NMismatches(localPL, labels, validationSchedule); return ValidationResults( nMismatches From cae0050a860fb5d4964f2814b8a1300b3d16ba2f Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Wed, 4 Dec 2019 10:41:03 -0800 Subject: [PATCH 06/43] Bounded univariate minimization (#183) * Added comments * Test case for new univariate optimizer. * Update to latest alpha. * Fix order of interval edges. * Fix parabola test. * Use new minimization to simplify training logic. * Used new optimization function to simplify bias search. * Revert to public alphas. * Temporarily revert use of Test attribute. --- Chemistry/src/DataModel/DataModel.csproj | 4 +- Chemistry/src/Jupyter/Jupyter.csproj | 6 +- Chemistry/src/Runtime/Runtime.csproj | 4 +- .../tests/ChemistryTests/QSharpTests.csproj | 6 +- .../tests/DataModelTests/CSharpTests.csproj | 6 +- .../tests/SamplesTests/SamplesTests.csproj | 6 +- .../tests/SystemTests/SystemTests.csproj | 6 +- .../src/DataModel/DataModel.csproj | 2 +- MachineLearning/src/Runtime/Circuits.qs | 883 +++++++---------- MachineLearning/src/Runtime/Runtime.csproj | 2 +- MachineLearning/src/Runtime/Training.qs | 936 ++++++++---------- MachineLearning/src/Runtime/Types.qs | 113 ++- MachineLearning/src/Runtime/Validation.qs | 156 +-- .../tests/MachineLearningTests.csproj | 4 +- Numerics/src/Numerics.csproj | 4 +- Numerics/tests/NumericsTests.csproj | 6 +- .../Optimization/Properties/NamespaceInfo.qs | 6 + Standard/src/Optimization/Univariate.qs | 84 ++ Standard/src/Standard.csproj | 4 +- .../tests/Optimization/UnivariateTests.qs | 20 + Standard/tests/Standard.Tests.csproj | 6 +- 21 files changed, 1102 insertions(+), 1162 deletions(-) create mode 100644 Standard/src/Optimization/Properties/NamespaceInfo.qs create mode 100644 Standard/src/Optimization/Univariate.qs create mode 100644 Standard/tests/Optimization/UnivariateTests.qs diff --git a/Chemistry/src/DataModel/DataModel.csproj b/Chemistry/src/DataModel/DataModel.csproj index 14086f84be6..4e1f891c5b3 100644 --- a/Chemistry/src/DataModel/DataModel.csproj +++ b/Chemistry/src/DataModel/DataModel.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 @@ -35,7 +35,7 @@ - + diff --git a/Chemistry/src/Jupyter/Jupyter.csproj b/Chemistry/src/Jupyter/Jupyter.csproj index 57c676d8836..05d9aa2329a 100644 --- a/Chemistry/src/Jupyter/Jupyter.csproj +++ b/Chemistry/src/Jupyter/Jupyter.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 x64 @@ -26,8 +26,8 @@ - - + + diff --git a/Chemistry/src/Runtime/Runtime.csproj b/Chemistry/src/Runtime/Runtime.csproj index 442a13d14c9..e95a5c76452 100644 --- a/Chemistry/src/Runtime/Runtime.csproj +++ b/Chemistry/src/Runtime/Runtime.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 Microsoft.Quantum.Chemistry.Runtime @@ -15,7 +15,7 @@ - + diff --git a/Chemistry/tests/ChemistryTests/QSharpTests.csproj b/Chemistry/tests/ChemistryTests/QSharpTests.csproj index 66e2888e2c0..96c76d44343 100644 --- a/Chemistry/tests/ChemistryTests/QSharpTests.csproj +++ b/Chemistry/tests/ChemistryTests/QSharpTests.csproj @@ -1,4 +1,4 @@ - + netcoreapp3.0 x64 @@ -11,8 +11,8 @@ - - + + diff --git a/Chemistry/tests/DataModelTests/CSharpTests.csproj b/Chemistry/tests/DataModelTests/CSharpTests.csproj index 9c3df4a3cce..754a219c1fc 100644 --- a/Chemistry/tests/DataModelTests/CSharpTests.csproj +++ b/Chemistry/tests/DataModelTests/CSharpTests.csproj @@ -1,4 +1,4 @@ - + netcoreapp3.0 @@ -24,8 +24,8 @@ - - + + diff --git a/Chemistry/tests/SamplesTests/SamplesTests.csproj b/Chemistry/tests/SamplesTests/SamplesTests.csproj index af9581bbb83..206c9c3b679 100644 --- a/Chemistry/tests/SamplesTests/SamplesTests.csproj +++ b/Chemistry/tests/SamplesTests/SamplesTests.csproj @@ -1,4 +1,4 @@ - + netcoreapp3.0 @@ -18,8 +18,8 @@ - - + + diff --git a/Chemistry/tests/SystemTests/SystemTests.csproj b/Chemistry/tests/SystemTests/SystemTests.csproj index 0da8e8b4d71..8bcec6ef3cd 100644 --- a/Chemistry/tests/SystemTests/SystemTests.csproj +++ b/Chemistry/tests/SystemTests/SystemTests.csproj @@ -1,4 +1,4 @@ - + netcoreapp3.0 @@ -18,8 +18,8 @@ - - + + diff --git a/MachineLearning/src/DataModel/DataModel.csproj b/MachineLearning/src/DataModel/DataModel.csproj index 1d1f8447711..fc1d86492a9 100644 --- a/MachineLearning/src/DataModel/DataModel.csproj +++ b/MachineLearning/src/DataModel/DataModel.csproj @@ -32,7 +32,7 @@ - + diff --git a/MachineLearning/src/Runtime/Circuits.qs b/MachineLearning/src/Runtime/Circuits.qs index 51bbd02a70e..49df95cd18e 100644 --- a/MachineLearning/src/Runtime/Circuits.qs +++ b/MachineLearning/src/Runtime/Circuits.qs @@ -2,536 +2,385 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Math; - open Microsoft.Quantum.Arrays; - open Microsoft.Quantum.Arithmetic; - open Microsoft.Quantum.Canon; - open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Convert; - open Microsoft.Quantum.Diagnostics; - open Microsoft.Quantum.Preparation; - open Microsoft.Quantum.Characterization; - - /// WARNING: the downstream EstimateFrequencyA counts the frequency of Zero - - operation measureLastQubit(nQubits : Int): (Qubit[] => Result) { - let paulis = ConstantArray(nQubits, PauliI) w/ (nQubits - 1) <- PauliZ; - return Measure(paulis, _); - } - - operation _endToEndPreparation(enc: (LittleEndian => Unit is Adj + Ctl), parameters: Double[], gates: GateSequence, reg: Qubit[]): Unit is Adj - { - enc(LittleEndian(reg)); - _ApplyGates(parameters, gates, reg); - } - - operation endToEndPreparation(enc: (LittleEndian => Unit is Adj + Ctl), parameters: Double[], gates: GateSequence) : (Qubit[] => Unit is Adj) - { - return _endToEndPreparation(enc,parameters, gates, _); - } - - function collectNegativeLocs(cNegative: Int, coefficients : ComplexPolar[]) : Int[] - { - mutable negLocs = ConstantArray(cNegative, -1); - mutable nlx = 0; - for (idx in 0 .. Length(coefficients) - 1) + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Arithmetic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Preparation; + open Microsoft.Quantum.Characterization; + + /// WARNING: the downstream EstimateFrequencyA counts the frequency of Zero + + operation measureLastQubit(nQubits : Int): (Qubit[] => Result) { + let paulis = ConstantArray(nQubits, PauliI) w/ (nQubits - 1) <- PauliZ; + return Measure(paulis, _); + } + + operation _endToEndPreparation(enc: (LittleEndian => Unit is Adj + Ctl), parameters: Double[], gates: GateSequence, reg: Qubit[]): Unit is Adj + { + enc(LittleEndian(reg)); + _ApplyGates(parameters, gates, reg); + } + + operation endToEndPreparation(enc: (LittleEndian => Unit is Adj + Ctl), parameters: Double[], gates: GateSequence) : (Qubit[] => Unit is Adj) + { + return _endToEndPreparation(enc,parameters, gates, _); + } + + function collectNegativeLocs(cNegative: Int, coefficients : ComplexPolar[]) : Int[] + { + mutable negLocs = ConstantArray(cNegative, -1); + mutable nlx = 0; + for (idx in 0 .. Length(coefficients) - 1) { - let (r,a) = (coefficients[idx])!; - if (AbsD(a - PI()) < 1E-9) { - if (nlx < cNegative) - { - set negLocs w/= nlx <- idx; - set nlx = nlx+1; - } - } + let (r,a) = (coefficients[idx])!; + if (AbsD(a - PI()) < 1E-9) { + if (nlx < cNegative) + { + set negLocs w/= nlx <- idx; + set nlx = nlx+1; + } + } } - return negLocs; - } //collectNegativeLocs + return negLocs; + } //collectNegativeLocs - // NOTE: the last qubit of 'reg' in this context is the auxillary qubit used in the Hadamard test. - operation _endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, reg: Qubit[]): Unit is Adj + Ctl { + // NOTE: the last qubit of 'reg' in this context is the auxillary qubit used in the Hadamard test. + operation _endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, reg: Qubit[]): Unit is Adj + Ctl { let L = Length(reg) - 1; let g1 = _ApplyGates(param1,gates1,_); let g2 = _ApplyGates(param2,gates2,_); enc(LittleEndian(reg[0..(L-1)])); - within { - H(Tail(reg)); - } apply { - (Controlled g1) ([reg[L]], reg[0..(L-1)]); - within { - X(Tail(reg)); - } apply { - (Controlled g2) ([reg[L]], reg[0..(L-1)]); - (Controlled Z) ([reg[L]], reg[(L-1)]); - } - } + within { + H(Tail(reg)); + } apply { + (Controlled g1) ([reg[L]], reg[0..(L-1)]); + within { + X(Tail(reg)); + } apply { + (Controlled g2) ([reg[L]], reg[0..(L-1)]); + (Controlled Z) ([reg[L]], reg[(L-1)]); + } + } + } + + operation endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl),param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence) : (Qubit[] => Unit is Adj) { + return _endToEndHTcircuit(enc,param1, gates1, param2, gates2, _); } - operation endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl),param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence) : (Qubit[] => Unit is Adj) { - return _endToEndHTcircuit(enc,param1, gates1, param2, gates2, _); - } - - operation HardamardTestPhysical(enc2: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, nQubits: Int, nMeasurements : Int): Double - { - return 1.0-EstimateFrequencyA(endToEndHTcircuit(enc2,param1,gates1,param2,gates2),measureLastQubit(nQubits), nQubits, nMeasurements); - } - - - - /// # Summary - /// polymorphic classical/quantum gradient estimator - /// - /// # Input - /// ## param - /// circuit parameters - /// - /// ## gates - /// sequence of gates in the circuits - /// - /// ## sg - /// generates quantum encoding of a subject sample (either simulated or true) - /// - /// ## measCount - /// number of true quantum measurements to estimate probabilities. - /// IMPORTANT: measCount==0 implies simulator deployment - /// - /// # Output - /// the gradient - /// - operation EstimateGradient(param : Double[], gates: GateSequence, sg: StateGenerator, nMeasurements : Int) : (Double[]) { - //Synopsis: Suppose (param,gates) define Circ0 - //Suppose (param1,gates1) define Circ1 that implements one-gate derivative of Circ0 - //The expectation derivative is then 2 Re[] = - // Re[] - Re[] - //We observe SEE THEORY that for (Circ1)=(Circ0)' , Re[]==0 - //Thus we are left to compute Re[] = - // 1 - 1/2 < (Z \otimes Id) Circ0 psi - Circ1 psi | (Z \otimes Id) Circ0 psi - Circ1 psi> - //i.e., 1 - HadamardTestResultHack(Circ1,[Z],Circ0) - - - //Now, suppose a gate at which we differentiate is the (Controlled R(\theta))([k0,k1,...,kr],[target]) - //and we want a unitary description of its \theta-derivative. It can be written as - // 1/2 {(Controlled R(\theta'))([k0,k1,...,kr],[target]) - (Controlled Z)([k1,...,kr],[k0])(Controlled R(\theta'))([k0,k1,...,kr],[target])} - let pC = Length(param); - mutable grad = ConstantArray(pC, 0.0); - mutable paramShift = param + [0.0]; - let nQubits = MaxI(NQubitsRequired(gates), sg::NQubits); - - for (gate in gates!) { - set paramShift w/= gate::Index <- (param[gate::Index] + PI()); //Shift the corresponding parameter - // NB: This the *antiderivative* of the bracket - let newDer = 2.0 * HardamardTestPhysical( - sg::Apply, param, gates, paramShift, gates, nQubits + 1, nMeasurements - ) - 1.0; - if (IsEmpty(gate::Span::ControlIndices)) { - //uncontrolled gate - set grad w/= gate::Index <- grad[gate::Index] + newDer; - } else { - //controlled gate - set paramShift w/=gate::Index<-(param[gate::Index]+3.0 * PI()); - //Assumption: any rotation R has the property that R(\theta+2 Pi)=(-1).R(\theta) - // NB: This the *antiderivative* of the bracket - let newDer1 = 2.0 * HardamardTestPhysical( - sg::Apply, param, gates, paramShift, gates, nQubits + 1, - nMeasurements - ) - 1.0; - set grad w/= gate::Index <- (grad[gate::Index] + 0.5* (newDer - newDer1)); - set paramShift w/= gate::Index <-( param[gate::Index] + PI()); //unshift by 2 Pi (for debugging purposes) - } - set paramShift w/= gate::Index <- param[gate::Index]; //unshift this parameter - } - return grad; - - } //GradientHack - - - /// # Summary - /// computes stochastic gradient on one classical sample - /// - /// # Input - /// ## param - /// circuit parameters - /// - /// ## gates - /// sequence of gates in the circuits - /// - /// ## sample - /// sample vector as a raw array - /// - /// ## nMeasurements - /// number of true quantum measurements to estimate probabilities - /// - /// # Output - /// the gradient - /// - operation EstimateGradientFromClassicalSample(tolerance: Double, param : Double[], gates: GateSequence, sample: Double[], nMeasurements : Int) : (Double[]) { - let nQubits = MaxI(FeatureRegisterSize(sample), NQubitsRequired(gates)); - let circEnc = NoisyInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); - let sg = StateGenerator(nQubits, circEnc); - return EstimateGradient(param, gates, sg, nMeasurements); - } - - //Csharp-frendly adapter for gradient estimation - //'gates' is a array of "flattened" controlled rotation defitions - //each such definition is Int[no.controls+3] in the format [parameter index, Pauli index, target index <,control qubit indices>] - //Pauli index is: 0 for I, 1 for X, 2 for y, 3 for Z - //target index is the index of the target qubit of the rotation - //Sequence of can be empty for uncontroled - operation GradientClassicalSimulationAdapter(tolerance: Double, param : Double[], gates: Int[][], sample: Double[]) : (Double[]) - { - - return EstimateGradientFromClassicalSample(tolerance, param,unFlattenGateSequence(gates),sample,0); - - } - - /// # Summary - /// Get a list of all the classification probabilities. In the from of (prob1,label) pairs. THIS operation is IN DEPRECATION - /// - /// # Input - /// ## samples - /// a container of labeled samples - /// - /// ## sched - /// a schedule to define a subset of samples - /// - /// ## param - /// parameters of the circuits - /// - /// ## gates - /// the sequence of gates in the circuit - /// - /// ## nMeasurements - /// the maximum number of quantum measurements used in the probability estimation - /// - /// # Output - /// TODO - operation ClassificationProbabilitiesClassicalData(samples: LabeledSample[], sched: SamplingSchedule, param: Double[], gates: GateSequence, nMeasurements: Int): - (Double,Int)[] { - mutable N = IsEmpty(samples) - ? NQubitsRequired(gates) - | MaxI(NQubitsRequired(gates), FeatureRegisterSize(_Features(Head(samples)))); - mutable ret = new (Double, Int)[0]; - for (rg in sched!) { - for (ix in rg) { - let sample = samples[ix]; - //agnostic w.r.t. simulator (may still be simulable) - let prob1 = EstimateClassificationProbabilityFromSample(1E-12, param, gates, sample::Features, nMeasurements); - set ret += [(prob1, sample::Label)]; - } - } - - return ret; - } - - operation EstimateClassificationProbabilitiesClassicalDataAdapter(tolerance: Double, samples: Double[][], schedule: Int[][], nQubits: Int, gates: Int[][], param: Double[], measCount: Int): Double[] - { - return EstimateClassificationProbabilitiesClassicalData(tolerance, samples, unFlattenSchedule(schedule), nQubits, unFlattenGateSequence(gates), param, measCount); - } - - /// # Summary - /// tallies hits and misses off a list of probability estimates - /// - /// # Input - /// ## pls - /// a list of estimated probabilities with the corresponding class labels - /// - /// ## bias - /// bias on record - /// - /// # Output - /// (no.hits, no.misses) pair - /// - function TallyHitsMisses(pls: (Double, Int)[], bias: Double) : (Int, Int) { - mutable hits = 0; - mutable misses = 0; - for ((classificationProbability, label) in pls) { - if (label == InferredLabel(bias, classificationProbability)) { - set hits += 1; - } else { - set misses += 1; - } - } - return (hits, misses); - } - - /// # Summary - /// generate a flat list of sample indices where mispredictions occur - /// - /// # Input - /// ## sched - /// a sampling schedule - /// - /// ## pls - /// a list of estimated probabilities with the corresponding class labels - /// - /// ## bias - /// bias on record - /// - /// # Output - /// the list of indices where mispredictions occur - /// - function MissLocations(sched : SamplingSchedule, pls : (Double, Int)[], bias: Double) : Int[] { - mutable ret = new Int[0]; - mutable ir = 0; - - for (rg in sched!) { - for (ix in rg) { - let (prob1, lab) = pls[ir]; - set ir += 1; - if (prob1 + bias > 0.5) { - if (lab < 1) { - set ret += [ix]; - } - } else { - if (lab > 0) { - set ret += [ix]; - } - } - } - } - return ret; - } - - /// # Summary - /// C#-friendly adapter to misclassification tally - /// - /// # Input - /// ## vectors - /// data vectors in flat encoding - /// - /// ## labels - /// array of corresponding class lables - /// - /// ## schedule - /// flat representation of index subset on which the circuit is scored - /// - /// ## param - /// circuit parameters - /// - /// ## gateStructure - /// gate structure in flat representation - /// - /// ## bias - /// prediction bias to be tested - /// - /// ## measCount - /// maximum number of quantum measurements per estimation (measCount==0 implies simulator deployment) - /// - /// # Output - /// the number of misclassifications - /// - operation MisclassificationScoreAdapter(vectors: Double[][], labels: Int[], schedule: Int[][], param: Double[], gateStructure: Int[][], bias: Double, measCount: Int) : Int { - mutable misses = 0; - let samples = unFlattenLabeledSamples(vectors,labels); - let gates = unFlattenGateSequence(gateStructure); - let sched = unFlattenSchedule(schedule); - - let pls = ClassificationProbabilitiesClassicalData(samples,sched,param,gates,measCount); - let biasCurrent = adjustBias(pls, bias, 0.01, 10); - let (h1,m1) = TallyHitsMisses(pls,biasCurrent); - return m1; - } - - /// # Summary - /// Semi-greedily find a bias value that leads to near-minimum misclassification score - /// - /// # Input - /// ## pls - /// a plist of probability estimates and corresponding labels - /// - /// ## bias - /// a fallback value of bias - /// - /// ## tol - /// acceptable tolerance in the bias estimate - /// - /// ## maxIter - /// maximum number of trial bisections - /// - /// # Output - /// the bias estimate - /// - function adjustBias(pls: (Double,Int)[], bias: Double, tol:Double, maxIter: Int) : Double - { - mutable min1 = 1.0; - mutable max0 = 0.0; - for (pl in pls) - { - if (Snd(pl)>0) - { - if (min1 > Fst(pl)) - { - set min1 = Fst(pl); - } - } - else - { - if (max0 < Fst(pl)) - { - set max0 = Fst(pl); - } - } - } - if (max0 <= min1) - { - return 0.5*(1.0-max0-min1); //Gives a perfect classification - } - mutable hBest = 0; - mutable mBest = Length(pls); - mutable bBest = bias; - mutable bLeft = 0.5-max0; - mutable bRight = 0.5-min1; - mutable bestDir = 0; - mutable (hLeft,mLeft) = TallyHitsMisses(pls,bLeft); - if (mLeft < mBest) - { - set bBest = bLeft; - set hBest = hLeft; - set mBest = mLeft; - set bestDir = -1; - } - mutable (hRight, mRight) = TallyHitsMisses(pls,bRight); - - if (mRight < mBest) - { - set bBest = bRight; - set hBest = hRight; - set mBest = mRight; - set bestDir = 1; - } - for (iter in 1..maxIter) - { - if ((bRight - bLeft) 0) //replace the weaker end - { - set bLeft = bMiddle; - set hLeft = hMiddle; - set mLeft = mMiddle; - - if (mMiddle * hBest < hMiddle * mBest) - { - set bBest = bMiddle; - set hBest = hMiddle; - set mBest = mMiddle; - set bestDir = -1; //note that the left end is now better - } - } - else //right end was the weaker end - { - set bRight = bMiddle; - set hRight = hMiddle; - set mRight = mMiddle; - if (mMiddle * hBest < hMiddle * mBest) - { - set bBest = bMiddle; - set hBest = hMiddle; - set mBest = mMiddle; - set bestDir = 1; //note that the right end is now better - } - } - //Done with the left end - } - else - { - if (mMiddle < mRight) - { - //We are better than the right but worse than the left - //Hence the right must be weaker - set bRight = bMiddle; - set hRight = hMiddle; - set mRight = mMiddle; - } - else - { - return bBest; //cannot continue the greedy search - } - } - } //rof iter - return bBest; - } //adjust bias - - /// # Summary - /// Extract a mini batch of samples and wrap the batch as a LabeledSampleContainer - /// - /// # Input - /// ## size - /// desired number of samples in the mini batch - /// - /// ## ixLoc - /// starting index for the batch in the list of locations - /// - /// ## locations - /// list of indices of samples of interest - /// - /// ## samples - /// the container to extract the samples from - /// - /// # Output - /// the mini batched wrapped as a LabeledSampleContainer - /// - /// # Remarks - /// the resulting mini batch can be occasionally shorter than the requested 'size' - /// (when it falls on the tail end of the list of 'locations') - /// - function ExtractMiniBatch(size: Int, ixLoc: Int, locations: Int[], samples: LabeledSample[]): LabeledSample[] { - mutable cnt = Length(locations)-ixLoc; - if (cnt > size) - { - set cnt = size; - } - mutable rgSamples = new LabeledSample[0]; - if (cnt > 0) - { - set rgSamples = new LabeledSample[cnt]; - for (isa in 0..(cnt-1)) - { - set rgSamples w/=isa<- samples[locations[ixLoc+isa]]; - } - } - return rgSamples; - } - - /// # Summary - /// (Randomly) inflate of deflate the source number - operation randomize(src : Double, relativeFuzz : Double) : Double { + operation HardamardTestPhysical(enc2: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, nQubits: Int, nMeasurements : Int): Double + { + return 1.0-EstimateFrequencyA(endToEndHTcircuit(enc2,param1,gates1,param2,gates2),measureLastQubit(nQubits), nQubits, nMeasurements); + } + + + + /// # Summary + /// polymorphic classical/quantum gradient estimator + /// + /// # Input + /// ## param + /// circuit parameters + /// + /// ## gates + /// sequence of gates in the circuits + /// + /// ## sg + /// generates quantum encoding of a subject sample (either simulated or true) + /// + /// ## measCount + /// number of true quantum measurements to estimate probabilities. + /// IMPORTANT: measCount==0 implies simulator deployment + /// + /// # Output + /// the gradient + /// + operation EstimateGradient(param : Double[], gates: GateSequence, sg: StateGenerator, nMeasurements : Int) : (Double[]) { + //Synopsis: Suppose (param,gates) define Circ0 + //Suppose (param1,gates1) define Circ1 that implements one-gate derivative of Circ0 + //The expectation derivative is then 2 Re[] = + // Re[] - Re[] + //We observe SEE THEORY that for (Circ1)=(Circ0)' , Re[]==0 + //Thus we are left to compute Re[] = + // 1 - 1/2 < (Z \otimes Id) Circ0 psi - Circ1 psi | (Z \otimes Id) Circ0 psi - Circ1 psi> + //i.e., 1 - HadamardTestResultHack(Circ1,[Z],Circ0) + + + //Now, suppose a gate at which we differentiate is the (Controlled R(\theta))([k0,k1,...,kr],[target]) + //and we want a unitary description of its \theta-derivative. It can be written as + // 1/2 {(Controlled R(\theta'))([k0,k1,...,kr],[target]) - (Controlled Z)([k1,...,kr],[k0])(Controlled R(\theta'))([k0,k1,...,kr],[target])} + let pC = Length(param); + mutable grad = ConstantArray(pC, 0.0); + mutable paramShift = param + [0.0]; + let nQubits = MaxI(NQubitsRequired(gates), sg::NQubits); + + for (gate in gates!) { + set paramShift w/= gate::Index <- (param[gate::Index] + PI()); //Shift the corresponding parameter + // NB: This the *antiderivative* of the bracket + let newDer = 2.0 * HardamardTestPhysical( + sg::Apply, param, gates, paramShift, gates, nQubits + 1, nMeasurements + ) - 1.0; + if (IsEmpty(gate::Span::ControlIndices)) { + //uncontrolled gate + set grad w/= gate::Index <- grad[gate::Index] + newDer; + } else { + //controlled gate + set paramShift w/=gate::Index<-(param[gate::Index]+3.0 * PI()); + //Assumption: any rotation R has the property that R(\theta+2 Pi)=(-1).R(\theta) + // NB: This the *antiderivative* of the bracket + let newDer1 = 2.0 * HardamardTestPhysical( + sg::Apply, param, gates, paramShift, gates, nQubits + 1, + nMeasurements + ) - 1.0; + set grad w/= gate::Index <- (grad[gate::Index] + 0.5* (newDer - newDer1)); + set paramShift w/= gate::Index <-( param[gate::Index] + PI()); //unshift by 2 Pi (for debugging purposes) + } + set paramShift w/= gate::Index <- param[gate::Index]; //unshift this parameter + } + return grad; + + } //GradientHack + + + /// # Summary + /// computes stochastic gradient on one classical sample + /// + /// # Input + /// ## param + /// circuit parameters + /// + /// ## gates + /// sequence of gates in the circuits + /// + /// ## sample + /// sample vector as a raw array + /// + /// ## nMeasurements + /// number of true quantum measurements to estimate probabilities + /// + /// # Output + /// the gradient + /// + operation EstimateGradientFromClassicalSample(tolerance: Double, param : Double[], gates: GateSequence, sample: Double[], nMeasurements : Int) : (Double[]) { + let nQubits = MaxI(FeatureRegisterSize(sample), NQubitsRequired(gates)); + let circEnc = NoisyInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); + let sg = StateGenerator(nQubits, circEnc); + return EstimateGradient(param, gates, sg, nMeasurements); + } + + //Csharp-frendly adapter for gradient estimation + //'gates' is a array of "flattened" controlled rotation defitions + //each such definition is Int[no.controls+3] in the format [parameter index, Pauli index, target index <,control qubit indices>] + //Pauli index is: 0 for I, 1 for X, 2 for y, 3 for Z + //target index is the index of the target qubit of the rotation + //Sequence of can be empty for uncontroled + operation GradientClassicalSimulationAdapter(tolerance: Double, param : Double[], gates: Int[][], sample: Double[]) : (Double[]) + { + + return EstimateGradientFromClassicalSample(tolerance, param,unFlattenGateSequence(gates),sample,0); + + } + + /// # Summary + /// Get a list of all the classification probabilities. In the from of (prob1,label) pairs. THIS operation is IN DEPRECATION + /// + /// # Input + /// ## samples + /// a container of labeled samples + /// + /// ## sched + /// a schedule to define a subset of samples + /// + /// ## param + /// parameters of the circuits + /// + /// ## gates + /// the sequence of gates in the circuit + /// + /// ## nMeasurements + /// the maximum number of quantum measurements used in the probability estimation + /// + /// # Output + /// TODO + operation ClassificationProbabilitiesClassicalData(samples: LabeledSample[], sched: SamplingSchedule, param: Double[], gates: GateSequence, nMeasurements: Int): + (Double,Int)[] { + mutable N = IsEmpty(samples) + ? NQubitsRequired(gates) + | MaxI(NQubitsRequired(gates), FeatureRegisterSize(_Features(Head(samples)))); + mutable ret = new (Double, Int)[0]; + for (rg in sched!) { + for (ix in rg) { + let sample = samples[ix]; + //agnostic w.r.t. simulator (may still be simulable) + let prob1 = EstimateClassificationProbabilityFromSample(1E-12, param, gates, sample::Features, nMeasurements); + set ret += [(prob1, sample::Label)]; + } + } + + return ret; + } + + operation EstimateClassificationProbabilitiesClassicalDataAdapter(tolerance: Double, samples: Double[][], schedule: Int[][], nQubits: Int, gates: Int[][], param: Double[], measCount: Int): Double[] + { + return EstimateClassificationProbabilitiesClassicalData(tolerance, samples, unFlattenSchedule(schedule), nQubits, unFlattenGateSequence(gates), param, measCount); + } + + + /// # Summary + /// generate a flat list of sample indices where mispredictions occur + /// + /// # Input + /// ## sched + /// a sampling schedule + /// + /// ## pls + /// a list of estimated probabilities with the corresponding class labels + /// + /// ## bias + /// bias on record + /// + /// # Output + /// the list of indices where mispredictions occur + /// + function MissLocations(sched : SamplingSchedule, pls : (Double, Int)[], bias: Double) : Int[] { + mutable ret = new Int[0]; + mutable ir = 0; + + for (rg in sched!) { + for (ix in rg) { + let (prob1, lab) = pls[ir]; + set ir += 1; + if (prob1 + bias > 0.5) { + if (lab < 1) { + set ret += [ix]; + } + } else { + if (lab > 0) { + set ret += [ix]; + } + } + } + } + return ret; + } + + /// # Summary + /// C#-friendly adapter to misclassification tally + /// + /// # Input + /// ## vectors + /// data vectors in flat encoding + /// + /// ## labels + /// array of corresponding class lables + /// + /// ## schedule + /// flat representation of index subset on which the circuit is scored + /// + /// ## param + /// circuit parameters + /// + /// ## gateStructure + /// gate structure in flat representation + /// + /// ## bias + /// prediction bias to be tested + /// + /// ## measCount + /// maximum number of quantum measurements per estimation (measCount==0 implies simulator deployment) + /// + /// # Output + /// the number of misclassifications + /// + operation MisclassificationScoreAdapter(vectors: Double[][], labels: Int[], schedule: Int[][], param: Double[], gateStructure: Int[][], bias: Double, measCount: Int) : Int { + mutable misses = 0; + let samples = unFlattenLabeledSamples(vectors,labels); + let gates = unFlattenGateSequence(gateStructure); + let sched = unFlattenSchedule(schedule); + + let pls = ClassificationProbabilitiesClassicalData(samples,sched,param,gates,measCount); + let biasCurrent = _UpdatedBias(pls, bias, 0.01); + let (h1,m1) = TallyHitsMisses(pls,biasCurrent); + return m1; + } + + /// # Summary + /// Extract a mini batch of samples and wrap the batch as a LabeledSampleContainer + /// + /// # Input + /// ## size + /// desired number of samples in the mini batch + /// + /// ## ixLoc + /// starting index for the batch in the list of locations + /// + /// ## locations + /// list of indices of samples of interest + /// + /// ## samples + /// the container to extract the samples from + /// + /// # Output + /// the mini batched wrapped as a LabeledSampleContainer + /// + /// # Remarks + /// the resulting mini batch can be occasionally shorter than the requested 'size' + /// (when it falls on the tail end of the list of 'locations') + /// + function ExtractMiniBatch(size: Int, ixLoc: Int, locations: Int[], samples: LabeledSample[]): LabeledSample[] { + mutable cnt = Length(locations)-ixLoc; + if (cnt > size) + { + set cnt = size; + } + mutable rgSamples = new LabeledSample[0]; + if (cnt > 0) + { + set rgSamples = new LabeledSample[cnt]; + for (isa in 0..(cnt-1)) + { + set rgSamples w/=isa<- samples[locations[ixLoc+isa]]; + } + } + return rgSamples; + } + + /// # Summary + /// (Randomly) inflate of deflate the source number + operation randomize(src : Double, relativeFuzz : Double) : Double { return src * ( 1.0 + relativeFuzz * (Random([0.5, 0.5]) > 0 ? 1.0 | -1.0) ); - } - - - - /// Summary - /// One possible C#-friendly wrap around the StochasticTrainingLoop - /// - operation StochasticTrainingLoopPlainAdapter(vectors: Double[][], labels: Int[], sched: Int[][], schedScore: Int[][], periodScore: Int, - miniBatchSize: Int, param: Double[],gates: Int[][], bias: Double, lrate: Double, maxEpochs: Int, tol: Double, measCount: Int ) : Double[] // - { - let samples = unFlattenLabeledSamples(vectors,labels); - let sch = unFlattenSchedule(sched); - let schScore = unFlattenSchedule(sched); - let gts = unFlattenGateSequence(gates); - let ((h,m),(b,parpar)) = StochasticTrainingLoop(samples, sch, schScore, periodScore, - miniBatchSize, param, gts, bias, lrate, maxEpochs, tol, measCount); - mutable ret = new Double[Length(parpar)+3]; - set ret w/=0<-IntAsDouble (h); - set ret w/=1<-IntAsDouble (m); - set ret w/=2<-b; - for (j in 0..(Length(parpar)-1)) - { - set ret w/=(j+3)<-parpar[j]; - } - return ret; - } + } + + + + /// Summary + /// One possible C#-friendly wrap around the StochasticTrainingLoop + /// + operation StochasticTrainingLoopPlainAdapter(vectors: Double[][], labels: Int[], sched: Int[][], schedScore: Int[][], periodScore: Int, + miniBatchSize: Int, param: Double[],gates: Int[][], bias: Double, lrate: Double, maxEpochs: Int, tol: Double, measCount: Int ) : Double[] // + { + let samples = unFlattenLabeledSamples(vectors,labels); + let sch = unFlattenSchedule(sched); + let schScore = unFlattenSchedule(sched); + let gts = unFlattenGateSequence(gates); + let ((h,m),(b,parpar)) = StochasticTrainingLoop(samples, sch, schScore, periodScore, + miniBatchSize, param, gts, bias, lrate, maxEpochs, tol, measCount); + mutable ret = new Double[Length(parpar)+3]; + set ret w/=0<-IntAsDouble (h); + set ret w/=1<-IntAsDouble (m); + set ret w/=2<-b; + for (j in 0..(Length(parpar)-1)) + { + set ret w/=(j+3)<-parpar[j]; + } + return ret; + } } diff --git a/MachineLearning/src/Runtime/Runtime.csproj b/MachineLearning/src/Runtime/Runtime.csproj index e9e8700740f..5e1fb341f6c 100644 --- a/MachineLearning/src/Runtime/Runtime.csproj +++ b/MachineLearning/src/Runtime/Runtime.csproj @@ -10,7 +10,7 @@ - + diff --git a/MachineLearning/src/Runtime/Training.qs b/MachineLearning/src/Runtime/Training.qs index dc61875bd55..a6072d499da 100644 --- a/MachineLearning/src/Runtime/Training.qs +++ b/MachineLearning/src/Runtime/Training.qs @@ -5,504 +5,442 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Convert; open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Canon; - - /// # Summary - /// Returns a bias value that leads to near-minimum misclassification score. - /// - /// # Remarks - /// Note that `probabilities` and `labels` will not in general have the same - /// length, as `labels` is indexed by a training set index while `probabilities` - /// is indexed by the given sampling schedule. - function _UpdatedBias(probabilities: Double[], labels: Int[], sched: SamplingSchedule, bias: Double, tolerance: Double, maxIter: Int) : Double { - mutable min1 = 1.0; - mutable max0 = 0.0; - mutable ipro = 0; - for (rg in sched!) { - for (ix in rg) { - let prob = probabilities[ipro]; - let lab = labels[ix]; - if (lab > 0) { - if (min1 > prob) { - set min1 = prob; - } - } else { - if (max0 < prob) { - set max0 = prob; - } - } - set ipro += 1; - } - } - // Exit early if we can find a perfect classification. - if (max0 <= min1) { - return 0.5 * (1.0 - max0 - min1); - } - mutable mBest = Length(probabilities); - mutable bBest = bias; - mutable bLeft = 0.5 - max0; - mutable bRight = 0.5 - min1; - mutable bestDir = 0; - mutable proposedLabels = InferredLabels(bLeft, probabilities); - mutable mLeft = NMismatches(proposedLabels, labels, sched); - if (mLeft < mBest) { - set bBest = bLeft; - set mBest = mLeft; - set bestDir = -1; - } - set proposedLabels = InferredLabels(bRight, probabilities); - mutable mRight = NMismatches(proposedLabels, labels, sched); - if (mRight < mBest) { - set bBest = bRight; - set mBest = mRight; - set bestDir = 1; - } - - for (iter in 1..maxIter) { - if ((bRight - bLeft) < tolerance) - { - return bBest; - } - let bMiddle = 0.5 * (bLeft+bRight); - set proposedLabels = InferredLabels(bMiddle, probabilities); - let mMiddle = NMismatches(proposedLabels, labels, sched); - - if (mMiddle < mLeft) { - if (bestDir > 0) { //replace the weaker end - set bLeft = bMiddle; - set mLeft = mMiddle; - - if (mMiddle < mBest) { - set bBest = bMiddle; - set mBest = mMiddle; - set bestDir = -1; //note that the left end is now better - } - } else { //right end was the weaker end - set bRight = bMiddle; - set mRight = mMiddle; - if (mMiddle < mBest) { - set bBest = bMiddle; - set mBest = mMiddle; - set bestDir = 1; //note that the right end is now better - } - } - //Done with the left end - } else { - if (mMiddle < mRight) { - // We are better than the right but worse than the left. - // Hence the right must be weaker. - set bRight = bMiddle; - set mRight = mMiddle; - } else { - return bBest; //cannot continue the greedy search - } - } - - } - return bias; - } //recomputeBias - - operation TrainSequentialClassifier( - nQubits: Int, - gates: GateSequence, - parameterSource: Double[][], - samples: LabeledSample[], - trainingSchedule: SamplingSchedule, - validationSchedule: SamplingSchedule, - learningRate: Double, - tolerance: Double, - miniBatchSize: Int, - maxEpochs: Int, - nMeasurements: Int - ) : (Double[], Double) { - mutable retParam = [-1E12]; - mutable retBias = -2.0; //Indicates non-informative start - mutable bestValidation = Length(samples) + 1; - - let features = Mapped(_Features, samples); - let labels = Mapped(_Label, samples); - - let cTechnicalIter = 10; //10 iterations are sufficient for bias adjustment in most cases - for (idxStart in 0..(Length(parameterSource) - 1)) { - Message($"Beginning training at start point #{idxStart}..."); - let ((h, m), (b, parpar)) = StochasticTrainingLoop( - samples, trainingSchedule, trainingSchedule, 1, miniBatchSize, - parameterSource[idxStart], gates, 0.0, learningRate, maxEpochs, - tolerance, nMeasurements - ); - let probsValidation = EstimateClassificationProbabilitiesClassicalData( - tolerance, features, validationSchedule, nQubits, - gates, parpar, nMeasurements - ); - //Estimate bias here! - let localBias = _UpdatedBias( - probsValidation, - labels, - validationSchedule, - 0.0, - tolerance, - cTechnicalIter - ); - let localPL = InferredLabels(localBias, probsValidation); - let localMisses = NMismatches(localPL, labels, validationSchedule); - if (bestValidation > localMisses) { - set bestValidation = localMisses; - set retParam = parpar; - set retBias = localBias; - } - - } - return (retParam, retBias); - } - - /// # Summary - /// Using a flat description of a classification model, find a good local optimum - /// for the model parameters and a related calssification bias - /// - /// # Input - /// ## nQubits - /// the number of qubits used for data encoding - /// - /// ## gates - /// flat characterization of circuit structure. Each element is [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] - /// - /// ## parameterSource - /// an array of parameter arrays, to be used as SGD starting points - /// - /// ## trainingSet - /// the set of training samples - /// - /// ## trainingLabels - /// the set of training labels - /// - /// ## trainingSchedule - /// defines a subset of training data actually used in the training process - /// - /// ## validatioSchedule - /// defines a subset of training data used for validation and computation of the *bias* - /// - /// ## learningRate - /// initial learning rate for stochastic gradient descent - /// - /// ## tolerance - /// sufficient absolute precision of parameter updates - /// - /// ## learningRate - /// initial learning rate for stochastic gradient descent - /// - /// ## miniBatchSize - /// maximum size of SGD mini batches - /// - /// ## maxEpochs - /// limit to the number of training epochs - /// - /// ## nMeasurenets - /// number of the measurement cycles to be used for estimation of each probability - /// - /// # Output - /// (Array of optimal parameters, optimal validation *bias*) - /// - operation TrainQcccSequential(nQubits: Int, gates: Int[][], parameterSource: Double[][], trainingSet: Double[][], trainingLabels: Int[], trainingSchedule: Int[][], validationSchedule: Int[][], - learningRate: Double, tolerance: Double, miniBatchSize: Int, maxEpochs: Int, nMeasurements: Int) : (Double[],Double) { - let samples = unFlattenLabeledSamples(trainingSet,trainingLabels); - let sch = unFlattenSchedule(trainingSchedule); - let schValidate = unFlattenSchedule(validationSchedule); - let gateSequence = unFlattenGateSequence(gates); - - return TrainSequentialClassifier( - nQubits, gateSequence, parameterSource, samples, - sch, schValidate, learningRate, tolerance, miniBatchSize, - maxEpochs, nMeasurements - ); - } //TrainQcccSequential - - /// # Summary - /// attempts a single parameter update in the direction of mini batch gradient - /// - /// # Input - /// ## miniBatch - /// container of labeled samples in the mini batch - /// - /// ## param - /// circuit parameters - /// - /// ## gates - /// sequence of gates in the circuits - /// - /// ## lrate - /// the learning rate - /// - /// ## measCount - /// number of true quantum measurements to estimate probabilities. - /// - /// # Output - /// (utility, (new)parameters) pair - /// - operation OneStochasticTrainingStep( - tolerance: Double, miniBatch: LabeledSample[], param: Double[], gates: GateSequence, - lrate: Double, measCount: Int - ) : (Double, Double[]) { - mutable upParam = new Double[Length(param)]; - mutable batchGradient = ConstantArray(Length(param), 0.0); - - for (samp in miniBatch) { - mutable err = IntAsDouble(samp::Label); - if (err < 1.0) { - set err = -1.0; //class 0 misclassified to class 1; strive to reduce the probability - } - let grad = EstimateGradientFromClassicalSample(tolerance, param, gates, samp::Features, measCount); - for (ip in 0..(Length(param) - 1)) { - // GradientClassicalSample actually computes antigradient, but err*grad corrects it back to gradient - set batchGradient w/= ip <- (batchGradient[ip] + lrate * err * grad[ip]); - } - - } - for (ip in 0..(Length(param)-1)) { - set upParam w/= ip <- (param[ip] + batchGradient[ip]); - } - return (SquaredNorm(batchGradient), upParam); //TODO:REVIEW: Ok to interpret utility as size of the overall move? - } - - - /// # Summary - /// Perform one epoch of circuit training on a subset of data samples to a quantum simulator - /// - /// # Input - /// ## samples - /// a container of available data samples - /// - /// ## sched - /// a schedule of the data subset for this training loop - /// - /// ## schedScore - /// defines a (possibly different) data subset on which accuracy scoring is performed - /// - /// ## periodScore - /// number of blind gradient steps between scoring points (performance tool, set to 1 for best accuracy) - /// - /// ## miniBatchSize - /// number of samples in a gradient mini batch - /// - /// ## param - /// initial parameter vector - /// - /// ## gates - /// sequence of gates in the circuit - /// - /// ## bias - /// reserved for future use; originally - initial prediction bias - /// - /// ## lrate - /// learning rate - /// - /// ## measCount - /// number of true quantum measurements to estimate probabilities. - /// - operation OneStochasticTrainingEpoch(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, - miniBatchSize: Int, param: Double[], gates: GateSequence, bias: Double, lrate: Double, tolerance: Double, measCount: Int, - h0: Int, m0: Int): ((Int,Int),(Double,Double[])) - { - let HARDCODEDmaxIter = 10; - let HARDCODEDunderage = 3; //4/26 slack greater than 3 is not recommended - - - mutable hBest = h0; - mutable mBest = m0; - mutable biasBest = bias; - - let pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); - let (h2,m2) = TallyHitsMisses(pls,biasBest); - let missLocations = MissLocations(schedScore, pls, biasBest); - - mutable paramBest = param; - mutable paramCurrent = paramBest; - mutable biasCurrent = biasBest; - - //An epoch is just an attempt to update the parameters by learning from misses based on LKG parameters - for (ixLoc in 0..miniBatchSize..(Length(missLocations) - 1)) { - let miniBatch = ExtractMiniBatch(miniBatchSize, ixLoc, missLocations, samples); - let (utility,upParam) = OneStochasticTrainingStep(tolerance, miniBatch, paramCurrent, gates, lrate, measCount); - if (Microsoft.Quantum.Math.AbsD(utility) > 0.0000001) { - //There had been some parameter update - if (utility > 0.0) { //good parameter update - set paramCurrent = upParam; - let plsCurrent = ClassificationProbabilitiesClassicalData(samples, schedScore, paramCurrent, gates, measCount); - set biasCurrent = adjustBias(plsCurrent, bias, tolerance, HARDCODEDmaxIter); - let (h1,m1) = TallyHitsMisses(plsCurrent,biasCurrent); - if (m1 < mBest + HARDCODEDunderage) { - //we allow limited non-greediness - if (m1 < mBest) { - set hBest = h1; - set mBest = m1; - set paramBest = paramCurrent; - set biasBest = biasCurrent; - } - } else { - //otherwise we scrap the parameter update - set paramCurrent = paramBest; - set biasCurrent = biasBest; - } - } - - } - - } - return ((hBest, mBest), (biasBest, paramBest)); - } - - //Make some oblivious gradien descent steps without checking the prediction quality - operation OneUncontrolledStochasticTrainingEpoch(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, - miniBatchSize: Int, param: Double[], gates: GateSequence, bias: Double, lrate: Double, tolerance: Double, measCount: Int): ((Int,Int),(Double,Double[])) - { - let HARDCODEDmaxIter = 10; //TODO:MUST: tolerance and maxIter cannot stay hardcoded - let pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); - mutable biasBest = adjustBias(pls, bias, tolerance, HARDCODEDmaxIter); - let (h0,m0) = TallyHitsMisses(pls,biasBest); // ClassificationScoreSimulated(samples, schedScore, param, gates, bias); //Deprecated - mutable hCur = h0; - mutable mCur = m0; - let missLocations = MissLocations(schedScore, pls, biasBest); - - mutable paramBest = param; - mutable paramCurrent = paramBest; - mutable biasCurrent = biasBest; - - //An epoch is just an attempt to update the parameters by learning from misses based on LKG parameters - for (ixLoc in 0..miniBatchSize..(Length(missLocations) - 1)) { - let miniBatch = ExtractMiniBatch(miniBatchSize,ixLoc,missLocations,samples); - let (utility,upParam) = OneStochasticTrainingStep(tolerance, miniBatch, paramCurrent, gates, lrate, measCount); - if (AbsD(utility) > 0.0000001) { - //There had been some parameter update - if (utility > 0.0) { //good parameter update - set paramCurrent = upParam; - let plsCurrent = ClassificationProbabilitiesClassicalData(samples, schedScore, paramCurrent, gates, measCount); - set biasCurrent = adjustBias(plsCurrent, bias, tolerance, HARDCODEDmaxIter); - let (h1,m1) = TallyHitsMisses(plsCurrent,biasCurrent); - set hCur = h1; - set mCur = m1; - } - - } - - } - return ((hCur, mCur),(biasCurrent,paramCurrent)); - } //OneUncontrolledStochasticTrainingEpoch - - /// # Summary - /// Run a full circuit training loop on a subset of data samples - /// - /// # Input - /// ## samples - /// a container of available data samples - /// - /// ## sched - /// a schedule of the data subset for this training loop - /// - /// ## schedScore - /// defines a (possibly different) data subset on which accuracy scoring is performed - /// - /// ## periodScore - /// number of blind gradient steps between scoring points (performance tool, set to 1 for best accuracy) - /// - /// ## miniBatchSize - /// number of samples in a gradient mini batch - /// - /// ## param - /// initial parameter vector - /// - /// ## gates - /// sequence of gates in the circuit - /// - /// ## bias - /// reserved for future use; originally - initial prediction bias - /// - /// ## lrate - /// learning rate - /// - /// ## maxEpochs - /// maximum number of epochs in this loop - /// - /// ## tol - /// tolerance: acceptable misprediction rate in training - /// - /// ## measCount - /// number of true quantum measurements to estimate probabilities. - /// IMPORTANT: measCount==0 implies simulator deployment - /// - /// # Output - /// ((no.hits,no.misses),(opt.bias,opt.parameters)) - /// - operation StochasticTrainingLoop(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, - miniBatchSizeInital: Int, param: Double[], gates: GateSequence, bias: Double, lrateInitial: Double, maxEpochs: Int, tol: Double, measCount: Int): ((Int,Int),(Double,Double[])) - { - let HARDCODEDmaxIter = 10; - //const - let manyNoops = 4; - //const - let relFuzz = 0.01; - let HARDCODEDmaxNoops = 2*manyNoops; - mutable pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); - mutable biasBest = adjustBias(pls, bias, tol, HARDCODEDmaxIter); - let (h0,m0) = TallyHitsMisses(pls,biasBest); - mutable hBest = h0; - mutable mBest = m0; - mutable paramBest = param; - mutable paramCurrent = param; - mutable biasCurrent = biasBest; - - //reintroducing learning rate heuristics - mutable lrate = lrateInitial; - mutable batchSize = miniBatchSizeInital; - mutable noopCount = 0; - mutable upBias = biasCurrent; - mutable upParam = paramCurrent; - for (ep in 1..maxEpochs) { - let ((h1,m1),(upB,upP)) = OneStochasticTrainingEpoch(samples, sched, schedScore, periodScore, - batchSize, paramCurrent, gates, biasCurrent, lrate, tol, measCount, hBest, mBest); - set upBias = upB; - set upParam = upP; - if (m1 < mBest) - { - set hBest = h1; - set mBest = m1; - set paramBest = upParam; - set biasBest = upBias; - if (IntAsDouble (mBest)/IntAsDouble (mBest+hBest)< tol) //Terminate based on tolerance - { - return ((hBest,mBest),(biasBest,paramBest)); - } - set noopCount = 0; //Reset the counter of consequtive noops - set lrate = lrateInitial; - set batchSize = miniBatchSizeInital; - } - if (NearlyEqualD(biasCurrent,upBias) and _AllNearlyEqualD(paramCurrent,upParam)) - { - set noopCount = noopCount+1; - if (noopCount > manyNoops) - { - if (noopCount > HARDCODEDmaxNoops) - { - return ((hBest,mBest),(biasBest,paramBest)); //Too many non-steps. Continuation makes no sense - } - else - { - set upBias = randomize(upBias, relFuzz); - set upParam = ForEach(randomize(_, relFuzz), upParam); - } - } - set batchSize = noopCount; //batchSize + 1; //Try to fuzz things up with smaller batch count - //and heat up a bit - set lrate = 1.25*lrate; - } - else - { - set noopCount = 0; //Reset the counter of consequtive noops - set lrate = lrateInitial; - set batchSize = miniBatchSizeInital; - } - set paramCurrent = upParam; - set biasCurrent = upBias; - } - - return ((hBest,mBest),(biasBest,paramBest)); - } + open Microsoft.Quantum.Optimization; + + function _MisclassificationRate(probabilities : Double[], labels : Int[], bias : Double) : Double { + let proposedLabels = InferredLabels(bias, probabilities); + return IntAsDouble(NMismatches(proposedLabels, labels)) / IntAsDouble(Length(probabilities)); + } + + /// # Summary + /// Returns a bias value that leads to near-minimum misclassification score. + function _UpdatedBias(labeledProbabilities: (Double, Int)[], bias: Double, tolerance: Double) : Double { + mutable min1 = 1.0; + mutable max0 = 0.0; + + // Find the range of classification probabilities for each class. + for ((probability, label) in labeledProbabilities) { + if (label == 1) { + if (min1 > probability) { + set min1 = probability; + } + } else { + if (max0 < probability) { + set max0 = probability; + } + } + } + + // Exit early if we can find a perfect classification. + if (max0 <= min1) { + return 0.5 * (1.0 - max0 - min1); + } + + // If we can't find a perfect classification, minimize to find + // the best feasible bias. + let optimum = LocalUnivariateMinimum( + _MisclassificationRate(Mapped(Fst, labeledProbabilities), Mapped(Snd, labeledProbabilities), _), + (0.5 - max0, 0.5 - min1), + tolerance + ); + return optimum::Coordinate; + } + + operation TrainSequentialClassifier( + nQubits: Int, + gates: GateSequence, + parameterSource: Double[][], + samples: LabeledSample[], + trainingSchedule: SamplingSchedule, + validationSchedule: SamplingSchedule, + learningRate: Double, + tolerance: Double, + miniBatchSize: Int, + maxEpochs: Int, + nMeasurements: Int + ) : (Double[], Double) { + mutable retParam = [-1E12]; + mutable retBias = -2.0; //Indicates non-informative start + mutable bestValidation = Length(samples) + 1; + + let features = Mapped(_Features, samples); + let labels = Mapped(_Label, samples); + + for (idxStart in 0..(Length(parameterSource) - 1)) { + Message($"Beginning training at start point #{idxStart}..."); + let ((h, m), (b, parpar)) = StochasticTrainingLoop( + samples, trainingSchedule, trainingSchedule, 1, miniBatchSize, + parameterSource[idxStart], gates, 0.0, learningRate, maxEpochs, + tolerance, nMeasurements + ); + let probsValidation = EstimateClassificationProbabilitiesClassicalData( + tolerance, features, validationSchedule, nQubits, + gates, parpar, nMeasurements + ); + // Find the best bias for the new classification parameters. + let localBias = _UpdatedBias( + Zip(probsValidation, Sampled(validationSchedule, labels)), + 0.0, + tolerance + ); + let localPL = InferredLabels(localBias, probsValidation); + let localMisses = NMismatches(localPL, Sampled(validationSchedule, labels)); + if (bestValidation > localMisses) { + set bestValidation = localMisses; + set retParam = parpar; + set retBias = localBias; + } + + } + return (retParam, retBias); + } + + /// # Summary + /// Using a flat description of a classification model, find a good local optimum + /// for the model parameters and a related calssification bias + /// + /// # Input + /// ## nQubits + /// the number of qubits used for data encoding + /// + /// ## gates + /// flat characterization of circuit structure. Each element is [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] + /// + /// ## parameterSource + /// an array of parameter arrays, to be used as SGD starting points + /// + /// ## trainingSet + /// the set of training samples + /// + /// ## trainingLabels + /// the set of training labels + /// + /// ## trainingSchedule + /// defines a subset of training data actually used in the training process + /// + /// ## validatioSchedule + /// defines a subset of training data used for validation and computation of the *bias* + /// + /// ## learningRate + /// initial learning rate for stochastic gradient descent + /// + /// ## tolerance + /// sufficient absolute precision of parameter updates + /// + /// ## learningRate + /// initial learning rate for stochastic gradient descent + /// + /// ## miniBatchSize + /// maximum size of SGD mini batches + /// + /// ## maxEpochs + /// limit to the number of training epochs + /// + /// ## nMeasurenets + /// number of the measurement cycles to be used for estimation of each probability + /// + /// # Output + /// (Array of optimal parameters, optimal validation *bias*) + /// + operation TrainQcccSequential(nQubits: Int, gates: Int[][], parameterSource: Double[][], trainingSet: Double[][], trainingLabels: Int[], trainingSchedule: Int[][], validationSchedule: Int[][], + learningRate: Double, tolerance: Double, miniBatchSize: Int, maxEpochs: Int, nMeasurements: Int) : (Double[],Double) { + let samples = unFlattenLabeledSamples(trainingSet,trainingLabels); + let sch = unFlattenSchedule(trainingSchedule); + let schValidate = unFlattenSchedule(validationSchedule); + let gateSequence = unFlattenGateSequence(gates); + + return TrainSequentialClassifier( + nQubits, gateSequence, parameterSource, samples, + sch, schValidate, learningRate, tolerance, miniBatchSize, + maxEpochs, nMeasurements + ); + } //TrainQcccSequential + + /// # Summary + /// attempts a single parameter update in the direction of mini batch gradient + /// + /// # Input + /// ## miniBatch + /// container of labeled samples in the mini batch + /// + /// ## param + /// circuit parameters + /// + /// ## gates + /// sequence of gates in the circuits + /// + /// ## lrate + /// the learning rate + /// + /// ## measCount + /// number of true quantum measurements to estimate probabilities. + /// + /// # Output + /// (utility, (new)parameters) pair + /// + operation OneStochasticTrainingStep( + tolerance: Double, miniBatch: LabeledSample[], param: Double[], gates: GateSequence, + lrate: Double, measCount: Int + ) : (Double, Double[]) { + mutable upParam = new Double[Length(param)]; + mutable batchGradient = ConstantArray(Length(param), 0.0); + + for (samp in miniBatch) { + mutable err = IntAsDouble(samp::Label); + if (err < 1.0) { + set err = -1.0; //class 0 misclassified to class 1; strive to reduce the probability + } + let grad = EstimateGradientFromClassicalSample(tolerance, param, gates, samp::Features, measCount); + for (ip in 0..(Length(param) - 1)) { + // GradientClassicalSample actually computes antigradient, but err*grad corrects it back to gradient + set batchGradient w/= ip <- (batchGradient[ip] + lrate * err * grad[ip]); + } + + } + for (ip in 0..(Length(param)-1)) { + set upParam w/= ip <- (param[ip] + batchGradient[ip]); + } + return (SquaredNorm(batchGradient), upParam); //TODO:REVIEW: Ok to interpret utility as size of the overall move? + } + + + /// # Summary + /// Perform one epoch of circuit training on a subset of data samples to a quantum simulator + /// + /// # Input + /// ## samples + /// a container of available data samples + /// + /// ## sched + /// a schedule of the data subset for this training loop + /// + /// ## schedScore + /// defines a (possibly different) data subset on which accuracy scoring is performed + /// + /// ## periodScore + /// number of blind gradient steps between scoring points (performance tool, set to 1 for best accuracy) + /// + /// ## miniBatchSize + /// number of samples in a gradient mini batch + /// + /// ## param + /// initial parameter vector + /// + /// ## gates + /// sequence of gates in the circuit + /// + /// ## bias + /// reserved for future use; originally - initial prediction bias + /// + /// ## lrate + /// learning rate + /// + /// ## measCount + /// number of true quantum measurements to estimate probabilities. + /// + operation OneStochasticTrainingEpoch(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, + miniBatchSize: Int, param: Double[], gates: GateSequence, bias: Double, lrate: Double, tolerance: Double, measCount: Int, + h0: Int, m0: Int): ((Int,Int),(Double,Double[])) + { + let HARDCODEDunderage = 3; //4/26 slack greater than 3 is not recommended + + + mutable hBest = h0; + mutable mBest = m0; + mutable biasBest = bias; + + let pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); + let (h2,m2) = TallyHitsMisses(pls,biasBest); + let missLocations = MissLocations(schedScore, pls, biasBest); + + mutable paramBest = param; + mutable paramCurrent = paramBest; + mutable biasCurrent = biasBest; + + //An epoch is just an attempt to update the parameters by learning from misses based on LKG parameters + for (ixLoc in 0..miniBatchSize..(Length(missLocations) - 1)) { + let miniBatch = ExtractMiniBatch(miniBatchSize, ixLoc, missLocations, samples); + let (utility,upParam) = OneStochasticTrainingStep(tolerance, miniBatch, paramCurrent, gates, lrate, measCount); + if (Microsoft.Quantum.Math.AbsD(utility) > 0.0000001) { + //There had been some parameter update + if (utility > 0.0) { //good parameter update + set paramCurrent = upParam; + let plsCurrent = ClassificationProbabilitiesClassicalData(samples, schedScore, paramCurrent, gates, measCount); + set biasCurrent = _UpdatedBias(plsCurrent, bias, tolerance); + let (h1,m1) = TallyHitsMisses(plsCurrent,biasCurrent); + if (m1 < mBest + HARDCODEDunderage) { + //we allow limited non-greediness + if (m1 < mBest) { + set hBest = h1; + set mBest = m1; + set paramBest = paramCurrent; + set biasBest = biasCurrent; + } + } else { + //otherwise we scrap the parameter update + set paramCurrent = paramBest; + set biasCurrent = biasBest; + } + } + + } + + } + return ((hBest, mBest), (biasBest, paramBest)); + } + + //Make some oblivious gradien descent steps without checking the prediction quality + operation OneUncontrolledStochasticTrainingEpoch(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, + miniBatchSize: Int, param: Double[], gates: GateSequence, bias: Double, lrate: Double, tolerance: Double, measCount: Int): ((Int,Int),(Double,Double[])) + { + let pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); + mutable biasBest = _UpdatedBias(pls, bias, tolerance); + let (h0,m0) = TallyHitsMisses(pls,biasBest); // ClassificationScoreSimulated(samples, schedScore, param, gates, bias); //Deprecated + mutable hCur = h0; + mutable mCur = m0; + let missLocations = MissLocations(schedScore, pls, biasBest); + + mutable paramBest = param; + mutable paramCurrent = paramBest; + mutable biasCurrent = biasBest; + + //An epoch is just an attempt to update the parameters by learning from misses based on LKG parameters + for (ixLoc in 0..miniBatchSize..(Length(missLocations) - 1)) { + let miniBatch = ExtractMiniBatch(miniBatchSize,ixLoc,missLocations,samples); + let (utility,upParam) = OneStochasticTrainingStep(tolerance, miniBatch, paramCurrent, gates, lrate, measCount); + if (AbsD(utility) > 0.0000001) { + //There had been some parameter update + if (utility > 0.0) { //good parameter update + set paramCurrent = upParam; + let plsCurrent = ClassificationProbabilitiesClassicalData(samples, schedScore, paramCurrent, gates, measCount); + set biasCurrent = _UpdatedBias(plsCurrent, bias, tolerance); + let (h1,m1) = TallyHitsMisses(plsCurrent,biasCurrent); + set hCur = h1; + set mCur = m1; + } + + } + + } + return ((hCur, mCur),(biasCurrent,paramCurrent)); + } //OneUncontrolledStochasticTrainingEpoch + + /// # Summary + /// Run a full circuit training loop on a subset of data samples + /// + /// # Input + /// ## samples + /// a container of available data samples + /// + /// ## sched + /// a schedule of the data subset for this training loop + /// + /// ## schedScore + /// defines a (possibly different) data subset on which accuracy scoring is performed + /// + /// ## periodScore + /// number of blind gradient steps between scoring points (performance tool, set to 1 for best accuracy) + /// + /// ## miniBatchSize + /// number of samples in a gradient mini batch + /// + /// ## param + /// initial parameter vector + /// + /// ## gates + /// sequence of gates in the circuit + /// + /// ## bias + /// reserved for future use; originally - initial prediction bias + /// + /// ## lrate + /// learning rate + /// + /// ## maxEpochs + /// maximum number of epochs in this loop + /// + /// ## tol + /// tolerance: acceptable misprediction rate in training + /// + /// ## measCount + /// number of true quantum measurements to estimate probabilities. + /// IMPORTANT: measCount==0 implies simulator deployment + /// + /// # Output + /// ((no.hits,no.misses),(opt.bias,opt.parameters)) + /// + operation StochasticTrainingLoop(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, + miniBatchSizeInital: Int, param: Double[], gates: GateSequence, bias: Double, lrateInitial: Double, maxEpochs: Int, tol: Double, measCount: Int): ((Int,Int),(Double,Double[])) + { + //const + let manyNoops = 4; + //const + let relFuzz = 0.01; + let HARDCODEDmaxNoops = 2*manyNoops; + mutable pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); + mutable biasBest = _UpdatedBias(pls, bias, tol); + let (h0, m0) = TallyHitsMisses(pls,biasBest); + mutable hBest = h0; + mutable mBest = m0; + mutable paramBest = param; + mutable paramCurrent = param; + mutable biasCurrent = biasBest; + + //reintroducing learning rate heuristics + mutable lrate = lrateInitial; + mutable batchSize = miniBatchSizeInital; + mutable noopCount = 0; + mutable upBias = biasCurrent; + mutable upParam = paramCurrent; + for (ep in 1..maxEpochs) { + let ((h1,m1),(upB,upP)) = OneStochasticTrainingEpoch(samples, sched, schedScore, periodScore, + batchSize, paramCurrent, gates, biasCurrent, lrate, tol, measCount, hBest, mBest); + set upBias = upB; + set upParam = upP; + if (m1 < mBest) + { + set hBest = h1; + set mBest = m1; + set paramBest = upParam; + set biasBest = upBias; + if (IntAsDouble (mBest)/IntAsDouble (mBest+hBest)< tol) //Terminate based on tolerance + { + return ((hBest,mBest),(biasBest,paramBest)); + } + set noopCount = 0; //Reset the counter of consequtive noops + set lrate = lrateInitial; + set batchSize = miniBatchSizeInital; + } + if (NearlyEqualD(biasCurrent,upBias) and _AllNearlyEqualD(paramCurrent,upParam)) + { + set noopCount = noopCount+1; + if (noopCount > manyNoops) + { + if (noopCount > HARDCODEDmaxNoops) + { + return ((hBest,mBest),(biasBest,paramBest)); //Too many non-steps. Continuation makes no sense + } + else + { + set upBias = randomize(upBias, relFuzz); + set upParam = ForEach(randomize(_, relFuzz), upParam); + } + } + set batchSize = noopCount; //batchSize + 1; //Try to fuzz things up with smaller batch count + //and heat up a bit + set lrate = 1.25*lrate; + } + else + { + set noopCount = 0; //Reset the counter of consequtive noops + set lrate = lrateInitial; + set batchSize = miniBatchSizeInital; + } + set paramCurrent = upParam; + set biasCurrent = upBias; + } + + return ((hBest,mBest),(biasBest,paramBest)); + } } diff --git a/MachineLearning/src/Runtime/Types.qs b/MachineLearning/src/Runtime/Types.qs index 7b0d668294e..759acbc4094 100644 --- a/MachineLearning/src/Runtime/Types.qs +++ b/MachineLearning/src/Runtime/Types.qs @@ -2,71 +2,92 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Canon; - open Microsoft.Quantum.Arithmetic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Arithmetic; - /// Qubit span of a multicontrolled single-qubit gate - newtype GateSpan = ( - TargetIndex: Int, - ControlIndices: Int[] - ); + /// Qubit span of a multicontrolled single-qubit gate + newtype GateSpan = ( + TargetIndex: Int, + ControlIndices: Int[] + ); - /// One-parameter controlled rotation gate triplet: - /// (control structure, rotation axis, index of the rotation parameter) - newtype ControlledRotation = ( + /// One-parameter controlled rotation gate triplet: + /// (control structure, rotation axis, index of the rotation parameter) + newtype ControlledRotation = ( Span: GateSpan, Axis: Pauli, Index: Int ); - /// Abstraction for sequence of gates - newtype GateSequence = ControlledRotation[]; + /// Abstraction for sequence of gates + newtype GateSequence = ControlledRotation[]; - /// Abstraction for state preparation - /// Fst(StateGenerator) is the number of qubits - /// Snd(Stategenerator) is a circuit to prepare subject state - newtype StateGenerator = ( + /// Abstraction for state preparation + /// Fst(StateGenerator) is the number of qubits + /// Snd(Stategenerator) is a circuit to prepare subject state + newtype StateGenerator = ( NQubits: Int, Apply: (LittleEndian => Unit is Adj + Ctl) ); - /// Convention: negative Snd(labledSample) signifies the last sample in a batch - newtype LabeledSample = ( + /// Convention: negative Snd(labledSample) signifies the last sample in a batch + newtype LabeledSample = ( Features: Double[], Label: Int ); - // Here, we define a couple private accessor functions for LabeledSample, - // in lieu of having lambda support. These should not be used in external - // code. - function _Features(sample : LabeledSample) : Double[] { return sample::Features; } - function _Label(sample : LabeledSample) : Int { return sample::Label; } + // Here, we define a couple private accessor functions for LabeledSample, + // in lieu of having lambda support. These should not be used in external + // code. + function _Features(sample : LabeledSample) : Double[] { return sample::Features; } + function _Label(sample : LabeledSample) : Int { return sample::Label; } + + /// Abstraction for a two-level range of indices + newtype SamplingSchedule = Range[]; - /// Abstraction for a two-level range of indices - newtype SamplingSchedule = Range[]; + /// # Summary + /// Returns the number of elements in a given sampling schedule. + /// + /// # Input + /// ## schedule + /// A sampling schedule whose length is to be returned. + /// + /// # Output + /// The number of elements in the given sampling schedule. + function ScheduleLength(schedule : SamplingSchedule) : Int { + mutable length = 0; + for (range in schedule!) { + for (index in range) { + set length += 1; + } + } + return length; + } - /// # Summary - /// Returns the number of elements in a given sampling schedule. - /// - /// # Input - /// ## schedule - /// A sampling schedule whose length is to be returned. - /// - /// # Output - /// The number of elements in the given sampling schedule. - function ScheduleLength(schedule : SamplingSchedule) : Int { - mutable length = 0; - for (range in schedule!) { - for (index in range) { - set length += 1; - } - } - return length; - } + /// # Summary + /// Samples a given array, using the given schedule. + /// + /// # Input + /// ## schedule + /// A schedule to use in sampling values. + /// ## values + /// An array of values to be sampled. + /// + /// # Output + /// An array of elements from values, following the given schedule. + function Sampled<'T>(schedule : SamplingSchedule, values : 'T[]) : 'T[] { + mutable sampled = new 'T[0]; + for (range in schedule!) { + for (index in range) { + set sampled += [values[index]]; + } + } + return sampled; + } - newtype ValidationResults = ( - NMisclassifications: Int - ); + newtype ValidationResults = ( + NMisclassifications: Int + ); diff --git a/MachineLearning/src/Runtime/Validation.qs b/MachineLearning/src/Runtime/Validation.qs index bd0f50dff7d..fca2e87397a 100644 --- a/MachineLearning/src/Runtime/Validation.qs +++ b/MachineLearning/src/Runtime/Validation.qs @@ -3,74 +3,96 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Canon; - function NMismatches(proposed: Int[], actual: Int[], validationSchedule: SamplingSchedule): Int { - mutable count = 0; - mutable ir = 0; - for (rg in validationSchedule!) { - for (ix in rg) { - if (proposed[ir] != actual[ix]) { - set count += 1; - } - set ir += 1; - } - } - return count; - } + function NMismatches(proposed: Int[], actual: Int[]): Int { + mutable count = 0; + for ((proposedLabel, actualLabel) in Zip(proposed, actual)) { + if (proposedLabel != actualLabel) { + set count += 1; + } + } + return count; + } - /// # Summary - /// Using a flat description of a trained classification model, count - /// the number of mispredictions occuring over the validation set - /// - /// # Input - /// ## nQubits - /// the number of qubits used for data encoding - /// - /// ## trainingSet - /// the set of training samples - /// - /// ## trainingLabels - /// the set of training labels - /// - /// ## validatioSchedule - /// defines a subset of training data used for validation and computation of the *bias* - /// - /// ## gates - /// Flat representation of classifier structure. Each element is - /// [parameterIndex, pauliCode, targetQubit, sequence of control qubits] - /// - /// ## parameters - /// an array of candidate parameters - /// - /// ## bias - /// candidate predition bias - /// - /// ## nMeasurenets - /// number of the measurement cycles to be used for estimation of each probability - /// - /// # Output - /// the number of misclassifications - /// - operation CountValidationMisses(tolerance: Double, nQubits: Int, trainingSet: Double[][], trainingLabels: Int[], validationSchedule: Int[][], gates: Int[][], parameters: Double[],bias:Double, nMeasurements: Int) : Int - { - let schValidate = unFlattenSchedule(validationSchedule); - let results = ValidateModel( - tolerance, nQubits, Mapped(LabeledSample, Zip(trainingSet, trainingLabels)), - schValidate, unFlattenGateSequence(gates), - parameters, bias, nMeasurements - ); - return results::NMisclassifications; - } + /// # Summary + /// tallies hits and misses off a list of probability estimates + /// + /// # Input + /// ## pls + /// a list of estimated probabilities with the corresponding class labels + /// + /// ## bias + /// bias on record + /// + /// # Output + /// (no.hits, no.misses) pair + /// + function TallyHitsMisses(pls : (Double, Int)[], bias : Double) : (Int, Int) { + mutable hits = 0; + mutable misses = 0; + for ((classificationProbability, label) in pls) { + if (label == InferredLabel(bias, classificationProbability)) { + set hits += 1; + } else { + set misses += 1; + } + } + return (hits, misses); + } - operation ValidateModel(tolerance: Double, nQubits: Int, samples : LabeledSample[], validationSchedule: SamplingSchedule, gates: GateSequence, parameters: Double[], bias:Double, nMeasurements: Int) : ValidationResults - { - let features = Mapped(_Features, samples); - let labels = Mapped(_Label, samples); - let probsValidation = EstimateClassificationProbabilitiesClassicalData(tolerance, features, validationSchedule, nQubits, gates, parameters, nMeasurements); - let localPL = InferredLabels(bias, probsValidation); - let nMismatches = NMismatches(localPL, labels, validationSchedule); - return ValidationResults( - nMismatches - ); - } + /// # Summary + /// Using a flat description of a trained classification model, count + /// the number of mispredictions occuring over the validation set + /// + /// # Input + /// ## nQubits + /// the number of qubits used for data encoding + /// + /// ## trainingSet + /// the set of training samples + /// + /// ## trainingLabels + /// the set of training labels + /// + /// ## validatioSchedule + /// defines a subset of training data used for validation and computation of the *bias* + /// + /// ## gates + /// Flat representation of classifier structure. Each element is + /// [parameterIndex, pauliCode, targetQubit, sequence of control qubits] + /// + /// ## parameters + /// an array of candidate parameters + /// + /// ## bias + /// candidate predition bias + /// + /// ## nMeasurenets + /// number of the measurement cycles to be used for estimation of each probability + /// + /// # Output + /// the number of misclassifications + /// + operation CountValidationMisses(tolerance: Double, nQubits: Int, trainingSet: Double[][], trainingLabels: Int[], validationSchedule: Int[][], gates: Int[][], parameters: Double[],bias:Double, nMeasurements: Int) : Int + { + let schValidate = unFlattenSchedule(validationSchedule); + let results = ValidateModel( + tolerance, nQubits, Mapped(LabeledSample, Zip(trainingSet, trainingLabels)), + schValidate, unFlattenGateSequence(gates), + parameters, bias, nMeasurements + ); + return results::NMisclassifications; + } + + operation ValidateModel(tolerance: Double, nQubits: Int, samples : LabeledSample[], validationSchedule: SamplingSchedule, gates: GateSequence, parameters: Double[], bias:Double, nMeasurements: Int) : ValidationResults + { + let features = Mapped(_Features, samples); + let labels = Sampled(validationSchedule, Mapped(_Label, samples)); + let probsValidation = EstimateClassificationProbabilitiesClassicalData(tolerance, features, validationSchedule, nQubits, gates, parameters, nMeasurements); + let localPL = InferredLabels(bias, probsValidation); + let nMismatches = NMismatches(localPL, labels); + return ValidationResults( + nMismatches + ); + } } diff --git a/MachineLearning/tests/MachineLearningTests.csproj b/MachineLearning/tests/MachineLearningTests.csproj index d30ed6814b8..40057467b84 100644 --- a/MachineLearning/tests/MachineLearningTests.csproj +++ b/MachineLearning/tests/MachineLearningTests.csproj @@ -11,8 +11,8 @@ - - + + diff --git a/Numerics/src/Numerics.csproj b/Numerics/src/Numerics.csproj index fe72ffe01c3..2ae8d804e74 100644 --- a/Numerics/src/Numerics.csproj +++ b/Numerics/src/Numerics.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 Microsoft.Quantum.Numerics @@ -30,7 +30,7 @@ - + diff --git a/Numerics/tests/NumericsTests.csproj b/Numerics/tests/NumericsTests.csproj index 3f1f62f3317..c729aaa91ba 100644 --- a/Numerics/tests/NumericsTests.csproj +++ b/Numerics/tests/NumericsTests.csproj @@ -1,4 +1,4 @@ - + netcoreapp3.0 x64 @@ -15,8 +15,8 @@ - - + + diff --git a/Standard/src/Optimization/Properties/NamespaceInfo.qs b/Standard/src/Optimization/Properties/NamespaceInfo.qs new file mode 100644 index 00000000000..4f2b2e4058e --- /dev/null +++ b/Standard/src/Optimization/Properties/NamespaceInfo.qs @@ -0,0 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/// # Summary +/// Contains functions and optimizations for finding minima. +namespace Microsoft.Quantum.Optimization { } diff --git a/Standard/src/Optimization/Univariate.qs b/Standard/src/Optimization/Univariate.qs new file mode 100644 index 00000000000..92f238ec056 --- /dev/null +++ b/Standard/src/Optimization/Univariate.qs @@ -0,0 +1,84 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Optimization { + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Math; + + /// # Summary + /// Represents the result of optimizing a univariate function. + /// + /// # Input + /// ## Coordinate + /// Input at which an optimium was found. + /// ## Value + /// Value returned by the function at its optimum. + newtype UnivariateOptimizationResult = ( + Coordinate : Double, + Value : Double + ); + + /// # Summary + /// Returns the width of an interval. + function _Width(left : Double, right : Double) : Double { + return right - left; + } + + /// # Summary + /// Given an interval, returns a probe interval that contracts the given + /// interval by a factor of the golden ratio. + function _Probe(left : Double, right : Double) : (Double, Double) { + let goldenRatio = (Sqrt(5.0) + 1.0) / 2.0; + let delta = (_Width(left, right)) / goldenRatio; + return ( + right - delta, left + delta + ); + } + + /// # Summary + /// Returns the midpoint for an interval. + function _Midpoint(left : Double, right : Double) : (Double) { + return (left + right) / 2.0; + } + + /// # Summary + /// Returns the local minimum for a univariate function over a bounded interval, + /// using a golden interval search. + /// + /// # Input + /// ## fn + /// The univariate function to be minimized. + /// ## bounds + /// The interval in which the local minimum is to be found. + /// ## tolerance + /// The accuracy in the function input to be tolerated. + /// The search for local optima will continue until the interval is + /// smaller in width than this tolerance. + /// + /// # Output + /// A local optima of the given function, located within the given bounds. + function LocalUnivariateMinimum( + fn : (Double -> Double), + bounds : (Double, Double), + tolerance : Double + ) : UnivariateOptimizationResult { + + mutable interval = bounds; + mutable probe = _Probe(interval); + while (_Width(probe) > tolerance) { + set interval = + fn(Fst(probe)) < fn(Snd(probe)) + ? (Fst(interval), Snd(probe)) + | (Fst(probe), Snd(interval)); + set probe = _Probe(interval); + } + + let mid = _Midpoint(interval); + return UnivariateOptimizationResult( + mid, fn(mid) + ); + + } + +} diff --git a/Standard/src/Standard.csproj b/Standard/src/Standard.csproj index f32f4c1fdf1..d1e16215f06 100644 --- a/Standard/src/Standard.csproj +++ b/Standard/src/Standard.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 Microsoft.Quantum.Standard @@ -30,7 +30,7 @@ - + diff --git a/Standard/tests/Optimization/UnivariateTests.qs b/Standard/tests/Optimization/UnivariateTests.qs new file mode 100644 index 00000000000..73301bcb343 --- /dev/null +++ b/Standard/tests/Optimization/UnivariateTests.qs @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Tests { + open Microsoft.Quantum.Optimization; + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Diagnostics; + + function ParabolaCase(minima : Double, x : Double) : Double { + return PowD((x - minima), 2.0); + } + + // @Test("QuantumSimulator") + function MinimizedParabolaTest() : Unit { + let optimum = LocalUnivariateMinimum(ParabolaCase(3.14, _), (-7.0, +12.0), 1e-10); + NearEqualityFactD(optimum::Coordinate, 3.14); + NearEqualityFactD(optimum::Value, 0.0); + } + +} diff --git a/Standard/tests/Standard.Tests.csproj b/Standard/tests/Standard.Tests.csproj index 771ff3c378a..59a589b773b 100644 --- a/Standard/tests/Standard.Tests.csproj +++ b/Standard/tests/Standard.Tests.csproj @@ -1,4 +1,4 @@ - + netcoreapp3.0 x64 @@ -20,8 +20,8 @@ - - + + From 0e3b5f9eeef602c8f0c5f6baf5403f0a04e489db Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Fri, 6 Dec 2019 17:43:23 -0800 Subject: [PATCH 07/43] Add placeholder for QML unit tests. (#185) * Add placeholder for QML unit tests. * Add copyright header. * Trigger build on feature work. --- Chemistry/src/DataModel/DataModel.csproj | 2 +- Chemistry/src/Jupyter/Jupyter.csproj | 2 +- Chemistry/src/Runtime/Runtime.csproj | 2 +- .../tests/ChemistryTests/QSharpTests.csproj | 4 +-- .../tests/DataModelTests/CSharpTests.csproj | 6 ++-- .../tests/SamplesTests/SamplesTests.csproj | 6 ++-- .../tests/SystemTests/SystemTests.csproj | 4 +-- .../src/DataModel/DataModel.csproj | 2 +- MachineLearning/src/Runtime/Runtime.csproj | 2 +- MachineLearning/tests/InputEncodingTests.qs | 28 +++++++++++++++++++ .../tests/MachineLearningTests.csproj | 4 +-- Numerics/src/Numerics.csproj | 2 +- Numerics/tests/NumericsTests.csproj | 4 +-- Standard/src/Standard.csproj | 2 +- Standard/tests/Standard.Tests.csproj | 4 +-- build.yml | 1 + 16 files changed, 52 insertions(+), 23 deletions(-) create mode 100644 MachineLearning/tests/InputEncodingTests.qs diff --git a/Chemistry/src/DataModel/DataModel.csproj b/Chemistry/src/DataModel/DataModel.csproj index 4e1f891c5b3..a0f7a2db3ab 100644 --- a/Chemistry/src/DataModel/DataModel.csproj +++ b/Chemistry/src/DataModel/DataModel.csproj @@ -35,7 +35,7 @@ - + diff --git a/Chemistry/src/Jupyter/Jupyter.csproj b/Chemistry/src/Jupyter/Jupyter.csproj index 05d9aa2329a..5993652baef 100644 --- a/Chemistry/src/Jupyter/Jupyter.csproj +++ b/Chemistry/src/Jupyter/Jupyter.csproj @@ -26,7 +26,7 @@ - + diff --git a/Chemistry/src/Runtime/Runtime.csproj b/Chemistry/src/Runtime/Runtime.csproj index e95a5c76452..4eee1e92205 100644 --- a/Chemistry/src/Runtime/Runtime.csproj +++ b/Chemistry/src/Runtime/Runtime.csproj @@ -15,7 +15,7 @@ - + diff --git a/Chemistry/tests/ChemistryTests/QSharpTests.csproj b/Chemistry/tests/ChemistryTests/QSharpTests.csproj index 96c76d44343..e3ccd565594 100644 --- a/Chemistry/tests/ChemistryTests/QSharpTests.csproj +++ b/Chemistry/tests/ChemistryTests/QSharpTests.csproj @@ -11,8 +11,8 @@ - - + + diff --git a/Chemistry/tests/DataModelTests/CSharpTests.csproj b/Chemistry/tests/DataModelTests/CSharpTests.csproj index 754a219c1fc..c3243e55cba 100644 --- a/Chemistry/tests/DataModelTests/CSharpTests.csproj +++ b/Chemistry/tests/DataModelTests/CSharpTests.csproj @@ -24,9 +24,9 @@ - - - + + + diff --git a/Chemistry/tests/SamplesTests/SamplesTests.csproj b/Chemistry/tests/SamplesTests/SamplesTests.csproj index 206c9c3b679..ecf44796b07 100644 --- a/Chemistry/tests/SamplesTests/SamplesTests.csproj +++ b/Chemistry/tests/SamplesTests/SamplesTests.csproj @@ -18,9 +18,9 @@ - - - + + + diff --git a/Chemistry/tests/SystemTests/SystemTests.csproj b/Chemistry/tests/SystemTests/SystemTests.csproj index 8bcec6ef3cd..638b7bfa7cd 100644 --- a/Chemistry/tests/SystemTests/SystemTests.csproj +++ b/Chemistry/tests/SystemTests/SystemTests.csproj @@ -18,8 +18,8 @@ - - + + diff --git a/MachineLearning/src/DataModel/DataModel.csproj b/MachineLearning/src/DataModel/DataModel.csproj index fc1d86492a9..24297486e72 100644 --- a/MachineLearning/src/DataModel/DataModel.csproj +++ b/MachineLearning/src/DataModel/DataModel.csproj @@ -32,7 +32,7 @@ - + diff --git a/MachineLearning/src/Runtime/Runtime.csproj b/MachineLearning/src/Runtime/Runtime.csproj index 5e1fb341f6c..c3da71b95ce 100644 --- a/MachineLearning/src/Runtime/Runtime.csproj +++ b/MachineLearning/src/Runtime/Runtime.csproj @@ -10,7 +10,7 @@ - + diff --git a/MachineLearning/tests/InputEncodingTests.qs b/MachineLearning/tests/InputEncodingTests.qs new file mode 100644 index 00000000000..692901bb343 --- /dev/null +++ b/MachineLearning/tests/InputEncodingTests.qs @@ -0,0 +1,28 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning.Tests { + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Arithmetic; + open Microsoft.Quantum.Preparation; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.MachineLearning; + + operation _ApplyToBareRegister(op : (LittleEndian => Unit is Adj), register : Qubit[]) : Unit is Adj { + op(LittleEndian(register)); + } + + @Test("QuantumSimulator") + operation CheckInputEncoderWithPositiveInputs() : Unit { + let coefficients = [0.1, 0.2, 0.3, 0.4]; + let encoder = InputEncoder(coefficients); + AssertOperationsEqualReferenced(2, + _ApplyToBareRegister(PrepareArbitraryState(Mapped(ComplexPolar(_, 0.0), coefficients), _), _), + _ApplyToBareRegister(encoder, _) + ); + } + +} diff --git a/MachineLearning/tests/MachineLearningTests.csproj b/MachineLearning/tests/MachineLearningTests.csproj index 40057467b84..e8620296bed 100644 --- a/MachineLearning/tests/MachineLearningTests.csproj +++ b/MachineLearning/tests/MachineLearningTests.csproj @@ -11,8 +11,8 @@ - - + + diff --git a/Numerics/src/Numerics.csproj b/Numerics/src/Numerics.csproj index 2ae8d804e74..61d5bf1c417 100644 --- a/Numerics/src/Numerics.csproj +++ b/Numerics/src/Numerics.csproj @@ -30,7 +30,7 @@ - + diff --git a/Numerics/tests/NumericsTests.csproj b/Numerics/tests/NumericsTests.csproj index c729aaa91ba..4a7b29edfa4 100644 --- a/Numerics/tests/NumericsTests.csproj +++ b/Numerics/tests/NumericsTests.csproj @@ -15,8 +15,8 @@ - - + + diff --git a/Standard/src/Standard.csproj b/Standard/src/Standard.csproj index d1e16215f06..ad5987d5a62 100644 --- a/Standard/src/Standard.csproj +++ b/Standard/src/Standard.csproj @@ -30,7 +30,7 @@ - + diff --git a/Standard/tests/Standard.Tests.csproj b/Standard/tests/Standard.Tests.csproj index 59a589b773b..c05bed8b8ca 100644 --- a/Standard/tests/Standard.Tests.csproj +++ b/Standard/tests/Standard.Tests.csproj @@ -20,8 +20,8 @@ - - + + diff --git a/build.yml b/build.yml index f6146c89972..2be88668e74 100644 --- a/build.yml +++ b/build.yml @@ -4,6 +4,7 @@ name: $(Build.Major).$(Build.Minor).$(date:yyMM).$(DayOfMonth)$(rev:rr) trigger: - master +- feature/* variables: Build.Major: 0 From c10eea421667873bba110d3a07d2c18e76e7af41 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Mon, 9 Dec 2019 15:39:55 -0800 Subject: [PATCH 08/43] Consolidate exact and approximate multiplexing logic. (#184) * Consolidated exact and approximate MultiplexZ. * Consolidate exact and approx ApplyDiagonalUnitary * Finished consolidating multiplexers. * Code cleanup, inclusive language. * Whitespace fix and name clarification. * Consolidated arbitrary state preparation. * Slight cleanup. * API documentation comments. --- MachineLearning/src/Runtime/InputEncoding.qs | 166 +++--- .../src/Runtime/SpecialMultiplexor.qs | 252 --------- MachineLearning/src/Runtime/SpecialSP.qs | 242 --------- Standard/src/Canon/Utils/Multiplexer.qs | 500 +++++++++++------- .../{StatePreparation.qs => Arbitrary.qs} | 72 +-- 5 files changed, 441 insertions(+), 791 deletions(-) delete mode 100644 MachineLearning/src/Runtime/SpecialMultiplexor.qs delete mode 100644 MachineLearning/src/Runtime/SpecialSP.qs rename Standard/src/Preparation/{StatePreparation.qs => Arbitrary.qs} (82%) diff --git a/MachineLearning/src/Runtime/InputEncoding.qs b/MachineLearning/src/Runtime/InputEncoding.qs index cf243f2a3c2..201b1d5e3be 100644 --- a/MachineLearning/src/Runtime/InputEncoding.qs +++ b/MachineLearning/src/Runtime/InputEncoding.qs @@ -2,113 +2,113 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Preparation; open Microsoft.Quantum.Convert; open Microsoft.Quantum.Math; open Microsoft.Quantum.Arithmetic; open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Canon; - function _CanApplyTwoQubitCase(datum: Double[]) : Bool { - return((Length(datum)==4) and (Microsoft.Quantum.Math.AbsD(datum[0]*datum[3]-datum[1]*datum[2])< 1E-12) and (Microsoft.Quantum.Math.AbsD(datum[0])> 1E-4)); - } + function _CanApplyTwoQubitCase(datum: Double[]) : Bool { + return((Length(datum)==4) and (Microsoft.Quantum.Math.AbsD(datum[0]*datum[3]-datum[1]*datum[2])< 1E-12) and (Microsoft.Quantum.Math.AbsD(datum[0])> 1E-4)); + } - operation _ApplyTwoQubitCase(datum: Double[], reg: LittleEndian) : Unit is Adj + Ctl { - let x = datum[1]/datum[0]; - let y = datum[2]/datum[0]; - // we now encoding [1,x,y,x*y] - let ax = 2.0 * ArcTan(x); - let ay = 2.0 * ArcTan(y); - R(PauliY, ay, (reg!)[1]); - R(PauliY, ax, (reg!)[0]); - } + operation _ApplyTwoQubitCase(datum: Double[], reg: LittleEndian) : Unit is Adj + Ctl { + let x = datum[1]/datum[0]; + let y = datum[2]/datum[0]; + // we now encoding [1,x,y,x*y] + let ax = 2.0 * ArcTan(x); + let ay = 2.0 * ArcTan(y); + R(PauliY, ay, (reg!)[1]); + R(PauliY, ax, (reg!)[0]); + } - function _Unnegate(negLocs: Int[], coefficients : ComplexPolar[]) : ComplexPolar[] { - mutable ret = coefficients; - for (idxNegative in negLocs) { - if (idxNegative >= Length(coefficients)) { - fail $"Cannot set the phase at index {idxNegative}, only {Length(coefficients)} coefficients were provided."; - } - let coefficient = coefficients[idxNegative]; - set ret w/= idxNegative <- ComplexPolar(coefficient::Magnitude, 0.0); - } - return ret; - } + function _Unnegate(negLocs: Int[], coefficients : ComplexPolar[]) : ComplexPolar[] { + mutable ret = coefficients; + for (idxNegative in negLocs) { + if (idxNegative >= Length(coefficients)) { + fail $"Cannot set the phase at index {idxNegative}, only {Length(coefficients)} coefficients were provided."; + } + let coefficient = coefficients[idxNegative]; + set ret w/= idxNegative <- ComplexPolar(coefficient::Magnitude, 0.0); + } + return ret; + } - /// Do special processing on the first cNegative entries - operation _EncodeSparseNegativeInput(cNegative: Int, tolerance: Double,coefficients : ComplexPolar[], reg: LittleEndian): Unit is Adj + Ctl - { - let negLocs = collectNegativeLocs(cNegative, coefficients); - // Prepare the state disregarding the sign of negative components. - NoisyPrepareArbitraryState(tolerance, _Unnegate(negLocs, coefficients), reg); - // Reflect about the negative coefficients to apply the negative signs - // at the end. - for (ineg in 0..(cNegative - 1)) { - let jx = negLocs[ineg]; - if (jx > -1) { - ReflectAboutInteger(jx, reg); //TODO:REVIEW: this assumes that 2^Length(reg) is the minimal pad to Length(coefficients) - } - } - } + /// Do special processing on the first cNegative entries + operation _EncodeSparseNegativeInput(cNegative: Int, tolerance: Double,coefficients : ComplexPolar[], reg: LittleEndian): Unit is Adj + Ctl + { + let negLocs = collectNegativeLocs(cNegative, coefficients); + // Prepare the state disregarding the sign of negative components. + ApproximatelyPrepareArbitraryState(tolerance, _Unnegate(negLocs, coefficients), reg); + // Reflect about the negative coefficients to apply the negative signs + // at the end. + for (ineg in 0..(cNegative - 1)) { + let jx = negLocs[ineg]; + if (jx > -1) { + ReflectAboutInteger(jx, reg); //TODO:REVIEW: this assumes that 2^Length(reg) is the minimal pad to Length(coefficients) + } + } + } - function NoisyInputEncoder(tolerance: Double,coefficients : Double[]) : (LittleEndian => Unit is Adj + Ctl) { - //First quantize the coefficients: for a coef x find such y*tolerance, where y is integer and |x-y*tolerance| \neq tolerance/2 - let nCoefficients = Length(coefficients); + function NoisyInputEncoder(tolerance: Double,coefficients : Double[]) : (LittleEndian => Unit is Adj + Ctl) { + //First quantize the coefficients: for a coef x find such y*tolerance, where y is integer and |x-y*tolerance| \neq tolerance/2 + let nCoefficients = Length(coefficients); mutable coefficientsComplexPolar = new ComplexPolar[nCoefficients]; mutable cNegative = 0; for (idx in 0 .. nCoefficients - 1) { - mutable coef = coefficients[idx]; - if (tolerance > 1E-9) { - set coef = tolerance * IntAsDouble(Round(coefficients[idx] / tolerance)); //quantization - } - mutable ang = 0.0; - if (coef < 0.0) { - set cNegative += 1; - set coef = -coef; - set ang = PI(); - } + mutable coef = coefficients[idx]; + if (tolerance > 1E-9) { + set coef = tolerance * IntAsDouble(Round(coefficients[idx] / tolerance)); //quantization + } + mutable ang = 0.0; + if (coef < 0.0) { + set cNegative += 1; + set coef = -coef; + set ang = PI(); + } set coefficientsComplexPolar w/= idx <- ComplexPolar(coef, ang); } - // Check if we can apply the explicit two-qubit case. + // Check if we can apply the explicit two-qubit case. if (_CanApplyTwoQubitCase(coefficients)) { - return _ApplyTwoQubitCase(coefficients, _); - } - // If not, we may be able to use a special protocol in the case that - // there are only a few negative coefficients. - // Here, by a "few," we mean fewer than the number of qubits required - // to encode features. - if ((cNegative > 0) and (IntAsDouble(cNegative) < Lg(IntAsDouble(Length(coefficients))) + 1.0)) { - return _EncodeSparseNegativeInput(cNegative, tolerance, coefficientsComplexPolar, _); //TODO:MORE:ACCEPTANCE ("Wines" passing soi far) - } - - // Finally, we fall back to arbitrary state preparation. - return NoisyPrepareArbitraryState(tolerance, coefficientsComplexPolar, _); - } //EncodeNoisyInput + return _ApplyTwoQubitCase(coefficients, _); + } + // If not, we may be able to use a special protocol in the case that + // there are only a few negative coefficients. + // Here, by a "few," we mean fewer than the number of qubits required + // to encode features. + if ((cNegative > 0) and (IntAsDouble(cNegative) < Lg(IntAsDouble(Length(coefficients))) + 1.0)) { + return _EncodeSparseNegativeInput(cNegative, tolerance, coefficientsComplexPolar, _); //TODO:MORE:ACCEPTANCE ("Wines" passing soi far) + } + + // Finally, we fall back to arbitrary state preparation. + return ApproximatelyPrepareArbitraryState(tolerance, coefficientsComplexPolar, _); + } //EncodeNoisyInput - //TODO:REVIEW: Design consideration! The implicit qubit count must be read off from the state encoder, NOT from the gate sequence! + //TODO:REVIEW: Design consideration! The implicit qubit count must be read off from the state encoder, NOT from the gate sequence! - /// Create amplitude encoding of an array of real-valued coefficients - /// The vector of 'coefficients' does not have to be unitary - function InputEncoder(coefficients : Double[]): (LittleEndian => Unit is Adj + Ctl) { - //default implementation, does not respect sparcity - let nCoefficients = Length(coefficients); + /// Create amplitude encoding of an array of real-valued coefficients + /// The vector of 'coefficients' does not have to be unitary + function InputEncoder(coefficients : Double[]): (LittleEndian => Unit is Adj + Ctl) { + //default implementation, does not respect sparcity + let nCoefficients = Length(coefficients); mutable coefficientsComplexPolar = new ComplexPolar[nCoefficients]; mutable allPositive = true; for (idx in 0 .. nCoefficients - 1) { - mutable coef = coefficients[idx]; - mutable ang = 0.0; - if (coef < 0.0) - { - set allPositive = false; - set coef = -coef; - set ang =Microsoft.Quantum.Math.PI(); - } + mutable coef = coefficients[idx]; + mutable ang = 0.0; + if (coef < 0.0) { + set allPositive = false; + set coef = -coef; + set ang = PI(); + } set coefficientsComplexPolar w/= idx<-ComplexPolar(coef,ang); } if (_CanApplyTwoQubitCase(coefficients)) { - return _ApplyTwoQubitCase(coefficients,_); - } - return NoisyPrepareArbitraryState(1E-12, coefficientsComplexPolar, _); //this is preparing the state almost exactly so far - } + return _ApplyTwoQubitCase(coefficients, _); + } + return ApproximatelyPrepareArbitraryState(1E-12, coefficientsComplexPolar, _); //this is preparing the state almost exactly so far + } } \ No newline at end of file diff --git a/MachineLearning/src/Runtime/SpecialMultiplexor.qs b/MachineLearning/src/Runtime/SpecialMultiplexor.qs deleted file mode 100644 index a95ea0f58ec..00000000000 --- a/MachineLearning/src/Runtime/SpecialMultiplexor.qs +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Canon { - open Microsoft.Quantum.Arithmetic; - open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Arrays; - open Microsoft.Quantum.Math; - - /// # Summary - /// Applies a Pauli rotation conditioned on an array of qubits. - /// - /// This applies the multiply-controlled unitary operation $U$ that performs - /// rotations by angle $\theta_j$ about single-qubit Pauli operator $P$ - /// when controlled by the $n$-qubit number state $\ket{j}$. - /// - /// $U = \sum^{2^n-1}_{j=0}\ket{j}\bra{j}\otimes e^{i P \theta_j}$. - /// - /// # Input - /// ## tolerance - /// Coefficients under this tolerance level should be ignored - /// ## coefficients - /// Array of up to $2^n$ coefficients $\theta_j$. The $j$th coefficient - /// indexes the number state $\ket{j}$ encoded in little-endian format. - /// - /// ## pauli - /// Pauli operator $P$ that determines axis of rotation. - /// - /// ## control - /// $n$-qubit control register that encodes number states $\ket{j}$ in - /// little-endian format. - /// - /// ## target - /// Single qubit register that is rotated by $e^{i P \theta_j}$. - /// - /// # Remarks - /// `coefficients` will be padded with elements $\theta_j = 0.0$ if - /// fewer than $2^n$ are specified. - operation NoisyMultiplexPauli (tolerance: Double,coefficients : Double[], pauli : Pauli, control : LittleEndian, target : Qubit) : Unit - { - body (...) - { - if (pauli == PauliZ) - { - let op = NoisyMultiplexZ(tolerance, coefficients, control, _); - op(target); - } - elif (pauli == PauliX) - { - let op = NoisyMultiplexPauli(tolerance,coefficients, PauliZ, control, _); - ApplyWithCA(H, op, target); - } - elif (pauli == PauliY) - { - let op = NoisyMultiplexPauli(tolerance,coefficients, PauliX, control, _); - ApplyWithCA(Adjoint S, op, target); - } - elif (pauli == PauliI) - { - NoisyApplyDiagonalUnitary(tolerance,coefficients, control); - } - else - { - fail $"MultiplexPauli failed. Invalid pauli {pauli}."; - } - } - - adjoint invert; - controlled distribute; - controlled adjoint distribute; - } - - - function significantReal(tol: Double, rg:Double[]):Bool - { - for(j in 0..(Length(rg)-1)) - { - if (AbsD(rg[j])>tol) - { - return true; - } - } - return false; - } - - /// # Summary - /// Applies a Pauli Z rotation conditioned on an array of qubits. - /// - /// This applies the multiply-controlled unitary operation $U$ that performs - /// rotations by angle $\theta_j$ about single-qubit Pauli operator $Z$ - /// when controlled by the $n$-qubit number state $\ket{j}$. - /// - /// $U = \sum^{2^n-1}_{j=0}\ket{j}\bra{j}\otimes e^{i Z \theta_j}$. - /// - /// # Input - /// ## coefficients - /// Array of up to $2^n$ coefficients $\theta_j$. The $j$th coefficient - /// indexes the number state $\ket{j}$ encoded in little-endian format. - /// - /// ## control - /// $n$-qubit control register that encodes number states $\ket{j}$ in - /// little-endian format. - /// - /// ## target - /// Single qubit register that is rotated by $e^{i P \theta_j}$. - /// - /// # Remarks - /// `coefficients` will be padded with elements $\theta_j = 0.0$ if - /// fewer than $2^n$ are specified. - /// - /// # References - /// - Synthesis of Quantum Logic Circuits - /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov - /// https://arxiv.org/abs/quant-ph/0406176 - operation NoisyMultiplexZ (tolerance: Double, coefficients : Double[], control : LittleEndian, target : Qubit) : Unit - { - body (...) - { - // pad coefficients length at tail to a power of 2. - let coefficientsPadded = Padded(-2 ^ Length(control!), 0.0, coefficients); - - if (Length(coefficientsPadded) == 1) - { - // Termination case - if (AbsD(coefficientsPadded[0])> tolerance) - { - Exp([PauliZ], coefficientsPadded[0], [target]); - } - } - else - { - // Compute new coefficients. - let (coefficients0, coefficients1) = specialMultiplexZComputeCoefficients_(coefficientsPadded); - NoisyMultiplexZ(tolerance,coefficients0, LittleEndian((control!)[0 .. Length(control!) - 2]), target); - if (significantReal(tolerance,coefficients1)) - { - CNOT((control!)[Length(control!) - 1], target); - NoisyMultiplexZ(tolerance,coefficients1, LittleEndian((control!)[0 .. Length(control!) - 2]), target); - CNOT((control!)[Length(control!) - 1], target); - } - } - } - - adjoint invert; - - controlled (controlRegister, ...) - { - // pad coefficients length to a power of 2. - let coefficientsPadded = Padded(2 ^ (Length(control!) + 1), 0.0, Padded(-2 ^ Length(control!), 0.0, coefficients)); - let (coefficients0, coefficients1) = specialMultiplexZComputeCoefficients_(coefficientsPadded); - NoisyMultiplexZ(tolerance,coefficients0, control, target); - if (significantReal(tolerance,coefficients1)) - { - Controlled X(controlRegister, target); - NoisyMultiplexZ(tolerance,coefficients1, control, target); - Controlled X(controlRegister, target); - } - } - - controlled adjoint invert; - } - - - /// # Summary - /// Applies an array of complex phases to numeric basis states of a register of qubits. - /// - /// That is, this implements the diagonal unitary operation $U$ that applies a complex phase - /// $e^{i \theta_j}$ on the $n$-qubit number state $\ket{j}$. - /// - /// $U = \sum^{2^n-1}_{j=0}e^{i\theta_j}\ket{j}\bra{j}$. - /// - /// TODO: REIMPLEMENT THIS along the Welch et Bocharov lines - /// # Input - /// ## tolerance - /// Coefficients under this tolerance level should be ignored - /// ## coefficients - /// Array of up to $2^n$ coefficients $\theta_j$. The $j$th coefficient - /// indexes the number state $\ket{j}$ encoded in little-endian format. - /// - /// ## control - /// $n$-qubit control register that encodes number states $\ket{j}$ in - /// little-endian format. - /// - /// # Remarks - /// `coefficients` will be padded with elements $\theta_j = 0.0$ if - /// fewer than $2^n$ are specified. - /// - /// # References - /// - Synthesis of Quantum Logic Circuits - /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov - /// https://arxiv.org/abs/quant-ph/0406176 - operation NoisyApplyDiagonalUnitary (tolerance: Double, coefficients : Double[], qubits : LittleEndian) : Unit - { - body (...) - { - if (IsEmpty(qubits!)) { - fail "operation ApplyDiagonalUnitary -- Number of qubits must be greater than 0."; - } - - // pad coefficients length at tail to a power of 2. - let coefficientsPadded = Padded(-2 ^ Length(qubits!), 0.0, coefficients); - - // Compute new coefficients. - let (coefficients0, coefficients1) = specialMultiplexZComputeCoefficients_(coefficientsPadded); - NoisyMultiplexZ(tolerance,coefficients1, LittleEndian((qubits!)[0 .. Length(qubits!) - 2]), (qubits!)[Length(qubits!) - 1]); - - if (Length(coefficientsPadded) == 2) - { - - // Termination case - if (AbsD(coefficients0[0])>tolerance) - { - Exp([PauliI], 1.0 * coefficients0[0], qubits!); - } - } - else - { - NoisyApplyDiagonalUnitary(tolerance,coefficients0, LittleEndian((qubits!)[0 .. Length(qubits!) - 2])); - } - } - - adjoint invert; - controlled distribute; - controlled adjoint distribute; - } - - - /// # Summary - /// Implementation step of multiply-controlled Z rotations. - /// # See Also - /// - Microsoft.Quantum.Canon.MultiplexZ - function specialMultiplexZComputeCoefficients_ (coefficients : Double[]) : (Double[], Double[]) - { - let newCoefficientsLength = Length(coefficients) / 2; - mutable coefficients0 = new Double[newCoefficientsLength]; - mutable coefficients1 = new Double[newCoefficientsLength]; - - for (idxCoeff in 0 .. newCoefficientsLength - 1) - { - set coefficients0 w/= idxCoeff <- 0.5 * (coefficients[idxCoeff] + coefficients[idxCoeff + newCoefficientsLength]); - set coefficients1 w/= idxCoeff <- 0.5 * (coefficients[idxCoeff] - coefficients[idxCoeff + newCoefficientsLength]); - } - - return (coefficients0, coefficients1); - } - - - - -} - - diff --git a/MachineLearning/src/Runtime/SpecialSP.qs b/MachineLearning/src/Runtime/SpecialSP.qs deleted file mode 100644 index 620e108feff..00000000000 --- a/MachineLearning/src/Runtime/SpecialSP.qs +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Canon; - open Microsoft.Quantum.Arithmetic; - open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Math; - open Microsoft.Quantum.Arrays; - - // This library returns operations that prepare a specified quantum state - // from the computational basis state $\ket{0...0}$. - - /// # Summary - /// Returns an operation that prepares the given quantum state. - /// - /// The returned operation $U$ prepares an arbitrary quantum - /// state $\ket{\psi}$ with positive coefficients $\alpha_j\ge 0$ from - /// the $n$-qubit computational basis state $\ket{0...0}$. - /// - /// The action of U on a newly-allocated register is given by - /// $$ - /// \begin{align} - /// U \ket{0\cdots 0} = \ket{\psi} = \frac{\sum_{j=0}^{2^n-1}\alpha_j \ket{j}}{\sqrt{\sum_{j=0}^{2^n-1}|\alpha_j|^2}}. - /// \end{align} - /// $$ - /// - /// # Input - /// ## coefficients - /// Array of up to $2^n$ coefficients $\alpha_j$. The $j$th coefficient - /// indexes the number state $\ket{j}$ encoded in little-endian format. - /// - /// # Output - /// A state-preparation unitary operation $U$. - /// - /// # Remarks - /// Negative input coefficients $\alpha_j < 0$ will be treated as though - /// positive with value $|\alpha_j|$. `coefficients` will be padded with - /// elements $\alpha_j = 0.0$ if fewer than $2^n$ are specified. - /// - /// ## Example - /// The following snippet prepares the quantum state $\ket{\psi}=\sqrt{1/8}\ket{0}+\sqrt{7/8}\ket{2}$ - /// in the qubit register `qubitsLE`. - /// ```qsharp - /// let amplitudes = [Sqrt(0.125), 0.0, Sqrt(0.875), 0.0]; - /// let op = StatePreparationPositiveCoefficients(amplitudes); - /// using (qubits = Qubit[2]) { - /// let qubitsLE = LittleEndian(qubits); - /// op(qubitsLE); - /// } - /// ``` - function NoisyStatePreparationPositiveCoefficients (tolerance: Double, coefficients : Double[]) : (LittleEndian => Unit is Adj + Ctl) { - let nCoefficients = Length(coefficients); - mutable coefficientsComplexPolar = new ComplexPolar[nCoefficients]; - for (idx in 0 .. nCoefficients - 1) { - set coefficientsComplexPolar w/= idx <- ComplexPolar(AbsD(coefficients[idx]), 0.0); - } - return NoisyPrepareArbitraryState(tolerance, coefficientsComplexPolar, _); - } - - /// # Summary - /// Returns an operation that prepares a specific quantum state. - /// - /// The returned operation $U$ prepares an arbitrary quantum - /// state $\ket{\psi}$ with complex coefficients $r_j e^{i t_j}$ from - /// the $n$-qubit computational basis state $\ket{0...0}$. - /// - /// The action of U on a newly-allocated register is given by - /// $$ - /// \begin{align} - /// U\ket{0...0}=\ket{\psi}=\frac{\sum_{j=0}^{2^n-1}r_j e^{i t_j}\ket{j}}{\sqrt{\sum_{j=0}^{2^n-1}|r_j|^2}}. - /// \end{align} - /// $$ - /// - /// # Input - /// ## coefficients - /// Array of up to $2^n$ complex coefficients represented by their - /// absolute value and phase $(r_j, t_j)$. The $j$th coefficient - /// indexes the number state $\ket{j}$ encoded in little-endian format. - /// - /// # Output - /// A state-preparation unitary operation $U$. - /// - /// # Remarks - /// Negative input coefficients $r_j < 0$ will be treated as though - /// positive with value $|r_j|$. `coefficients` will be padded with - /// elements $(r_j, t_j) = (0.0, 0.0)$ if fewer than $2^n$ are - /// specified. - /// - /// ## Example - /// The following snippet prepares the quantum state $\ket{\psi}=e^{i 0.1}\sqrt{1/8}\ket{0}+\sqrt{7/8}\ket{2}$ - /// in the qubit register `qubitsLE`. - /// ```qsharp - /// let amplitudes = [Sqrt(0.125), 0.0, Sqrt(0.875), 0.0]; - /// let phases = [0.1, 0.0, 0.0, 0.0]; - /// mutable complexNumbers = new ComplexPolar[4]; - /// for (idx in 0..3) { - /// set complexNumbers[idx] = ComplexPolar(amplitudes[idx], phases[idx]); - /// } - /// let op = StatePreparationComplexCoefficients(complexNumbers); - /// using (qubits = Qubit[2]) { - /// let qubitsLE = LittleEndian(qubits); - /// op(qubitsLE); - /// } - /// ``` - function NoisyStatePreparationComplexCoefficients (tolerance: Double, coefficients : ComplexPolar[]) : (LittleEndian => Unit is Adj + Ctl) { - return NoisyPrepareArbitraryState(tolerance, coefficients, _); - } - - /// # Summary - /// Returns an operation that prepares a given quantum state. - /// - /// The returned operation $U$ prepares an arbitrary quantum - /// state $\ket{\psi}$ with complex coefficients $r_j e^{i t_j}$ from - /// the $n$-qubit computational basis state $\ket{0...0}$. - /// - /// $$ - /// \begin{align} - /// U\ket{0...0}=\ket{\psi}=\frac{\sum_{j=0}^{2^n-1}r_j e^{i t_j}\ket{j}}{\sqrt{\sum_{j=0}^{2^n-1}|r_j|^2}}. - /// \end{align} - /// $$ - /// - /// # Input - /// ## coefficients - /// Array of up to $2^n$ complex coefficients represented by their - /// absolute value and phase $(r_j, t_j)$. The $j$th coefficient - /// indexes the number state $\ket{j}$ encoded in little-endian format. - /// - /// ## qubits - /// Qubit register encoding number states in little-endian format. This is - /// expected to be initialized in the computational basis state - /// $\ket{0...0}$. - /// - /// # Remarks - /// Negative input coefficients $r_j < 0$ will be treated as though - /// positive with value $|r_j|$. `coefficients` will be padded with - /// elements $(r_j, t_j) = (0.0, 0.0)$ if fewer than $2^n$ are - /// specified. - /// - /// # References - /// - Synthesis of Quantum Logic Circuits - /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov - /// https://arxiv.org/abs/quant-ph/0406176 - operation NoisyPrepareArbitraryState (tolerance:Double, coefficients : ComplexPolar[], qubits : LittleEndian) : Unit is Adj + Ctl { - // pad coefficients at tail length to a power of 2. - let coefficientsPadded = Padded(-2 ^ Length(qubits!), ComplexPolar(0.0, 0.0), coefficients); - let target = (qubits!)[0]; - let op = (Adjoint _NoisyPrepareArbitraryState(tolerance,coefficientsPadded, _, _))(_, target); - op( - // Determine what controls to apply to `op`. - Length(qubits!) > 1 - ? LittleEndian((qubits!)[1 .. Length(qubits!) - 1]) - | LittleEndian(new Qubit[0]) - ); - } - - - function significantComplex(tol: Double, rg:ComplexPolar[]):Bool { - for (j in 0..(Length(rg)-1)) { - if (AbsComplexPolar(rg[j])>tol) { - return true; - } - } - return false; - } - - /// # Summary - /// Implementation step of arbitrary state preparation procedure. - /// - /// # See Also - /// - PrepareArbitraryState - /// - Microsoft.Quantum.Canon.MultiplexPauli - operation _NoisyPrepareArbitraryState(tolerance: Double, coefficients : ComplexPolar[], control : LittleEndian, target : Qubit) : Unit is Adj + Ctl { - // For each 2D block, compute disentangling single-qubit rotation parameters - let (disentanglingY, disentanglingZ, newCoefficients) = _NoisyStatePreparationSBMComputeCoefficients(coefficients); - if (significantReal(tolerance,disentanglingZ)) { - NoisyMultiplexPauli(tolerance,disentanglingZ, PauliZ, control, target); - } - if (significantReal(tolerance,disentanglingY)) { - NoisyMultiplexPauli(tolerance,disentanglingY, PauliY, control, target); - } - // target is now in |0> state up to the phase given by arg of newCoefficients. - - // Continue recursion while there are control qubits. - if (Length(control!) == 0) { - let (abs, arg) = newCoefficients[0]!; - if (AbsD(arg)> tolerance) - { - Exp([PauliI], -1.0 * arg, [target]); - } - } else { - if (significantComplex(tolerance,newCoefficients)) { - let newControl = LittleEndian((control!)[1 .. Length(control!) - 1]); - let newTarget = (control!)[0]; - _NoisyPrepareArbitraryState(tolerance,newCoefficients, newControl, newTarget); - } - } - } - - /// # Summary - /// Computes the Bloch sphere coordinates for a single-qubit state. - /// - /// Given two complex numbers $a0, a1$ that represent the qubit state, computes coordinates - /// on the Bloch sphere such that - /// $a0 \ket{0} + a1 \ket{1} = r e^{it}(e^{-i \phi /2}\cos{(\theta/2)}\ket{0}+e^{i \phi /2}\sin{(\theta/2)}\ket{1})$. - /// - /// # Input - /// ## a0 - /// Complex coefficient of state $\ket{0}$. - /// ## a1 - /// Complex coefficient of state $\ket{1}$. - /// - /// # Output - /// A tuple containing `(ComplexPolar(r, t), phi, theta)`. - function NoisyBlochSphereCoordinates (a0 : ComplexPolar, a1 : ComplexPolar) : (ComplexPolar, Double, Double) { - let abs0 = AbsComplexPolar(a0); - let abs1 = AbsComplexPolar(a1); - let arg0 = ArgComplexPolar(a0); - let arg1 = ArgComplexPolar(a1); - let r = Sqrt(abs0 * abs0 + abs1 * abs1); - let t = 0.5 * (arg0 + arg1); - let phi = arg1 - arg0; - let theta = 2.0 * ArcTan2(abs1, abs0); - return (ComplexPolar(r, t), phi, theta); - } - - /// # Summary - /// Implementation step of arbitrary state preparation procedure. - /// # See Also - /// - Microsoft.Quantum.Canon.PrepareArbitraryState - function _NoisyStatePreparationSBMComputeCoefficients (coefficients : ComplexPolar[]) : (Double[], Double[], ComplexPolar[]) { - mutable disentanglingZ = new Double[Length(coefficients) / 2]; - mutable disentanglingY = new Double[Length(coefficients) / 2]; - mutable newCoefficients = new ComplexPolar[Length(coefficients) / 2]; - for (idxCoeff in 0 .. 2 .. Length(coefficients) - 1) { - let (rt, phi, theta) = NoisyBlochSphereCoordinates(coefficients[idxCoeff], coefficients[idxCoeff + 1]); - set disentanglingZ w/= idxCoeff / 2 <- 0.5 * phi; - set disentanglingY w/= idxCoeff / 2 <- 0.5 * theta; - set newCoefficients w/= idxCoeff / 2 <- rt; - } - return (disentanglingY, disentanglingZ, newCoefficients); - } -} \ No newline at end of file diff --git a/Standard/src/Canon/Utils/Multiplexer.qs b/Standard/src/Canon/Utils/Multiplexer.qs index f6cfce0f835..0b4a2901e31 100644 --- a/Standard/src/Canon/Utils/Multiplexer.qs +++ b/Standard/src/Canon/Utils/Multiplexer.qs @@ -8,13 +8,20 @@ namespace Microsoft.Quantum.Canon { open Microsoft.Quantum.Math; /// # Summary - /// Applies a Pauli rotation conditioned on an array of qubits. - /// - /// This applies the multiply-controlled unitary operation $U$ that performs + /// Applies a Pauli rotation conditioned on an array of qubits. + /// + /// # Description + /// This applies a multiply controlled unitary operation that performs /// rotations by angle $\theta_j$ about single-qubit Pauli operator $P$ /// when controlled by the $n$-qubit number state $\ket{j}$. + /// In particular, the action of this operation is represented by the + /// unitary /// - /// $U = \sum^{2^n-1}_{j=0}\ket{j}\bra{j}\otimes e^{i P \theta_j}$. + /// $$ + /// \begin{align} + /// U = \sum^{2^n - 1}_{j=0} \ket{j}\bra{j} \otimes e^{i P \theta_j}. + /// \end{align} + /// ## /// /// # Input /// ## coefficients @@ -34,51 +41,157 @@ namespace Microsoft.Quantum.Canon { /// # Remarks /// `coefficients` will be padded with elements $\theta_j = 0.0$ if /// fewer than $2^n$ are specified. - operation MultiplexPauli (coefficients : Double[], pauli : Pauli, control : LittleEndian, target : Qubit) : Unit - { - body (...) - { - if (pauli == PauliZ) - { - let op = MultiplexZ(coefficients, control, _); - op(target); - } - elif (pauli == PauliX) - { - let op = MultiplexPauli(coefficients, PauliZ, control, _); - ApplyWithCA(H, op, target); - } - elif (pauli == PauliY) - { - let op = MultiplexPauli(coefficients, PauliX, control, _); - ApplyWithCA(Adjoint S, op, target); - } - elif (pauli == PauliI) - { - ApplyDiagonalUnitary(coefficients, control); + /// + /// # See Also + /// - ApproximatelyMultiplexPauli + operation MultiplexPauli(coefficients : Double[], pauli : Pauli, control : LittleEndian, target : Qubit) + : Unit is Adj + Ctl { + ApproximatelyMultiplexPauli(0.0, coefficients, pauli, control, target); + } + + /// # Summary + /// Applies a Pauli rotation conditioned on an array of qubits, truncating + /// small rotation angles according to a given tolerance. + /// + /// # Description + /// This applies a multiply controlled unitary operation that performs + /// rotations by angle $\theta_j$ about single-qubit Pauli operator $P$ + /// when controlled by the $n$-qubit number state $\ket{j}$. + /// In particular, the action of this operation is represented by the + /// unitary + /// + /// $$ + /// \begin{align} + /// U = \sum^{2^n - 1}_{j=0} \ket{j}\bra{j} \otimes e^{i P \theta_j}. + /// \end{align} + /// ## + /// + /// # Input + /// ## tolerance + /// A tolerance below which small coefficients are truncated. + /// + /// ## coefficients + /// Array of up to $2^n$ coefficients $\theta_j$. The $j$th coefficient + /// indexes the number state $\ket{j}$ encoded in little-endian format. + /// + /// ## pauli + /// Pauli operator $P$ that determines axis of rotation. + /// + /// ## control + /// $n$-qubit control register that encodes number states $\ket{j}$ in + /// little-endian format. + /// + /// ## target + /// Single qubit register that is rotated by $e^{i P \theta_j}$. + /// + /// # Remarks + /// `coefficients` will be padded with elements $\theta_j = 0.0$ if + /// fewer than $2^n$ are specified. + /// + /// # See Also + /// - MultiplexPauli + operation ApproximatelyMultiplexPauli(tolerance : Double, coefficients : Double[], pauli : Pauli, control : LittleEndian, target : Qubit) + : Unit is Adj + Ctl { + if (pauli == PauliZ) { + let op = ApproximatelyMultiplexZ(tolerance, coefficients, control, _); + op(target); + } elif (pauli == PauliX) { + let op = ApproximatelyMultiplexPauli(tolerance, coefficients, PauliZ, control, _); + ApplyWithCA(H, op, target); + } elif (pauli == PauliY) { + let op = ApproximatelyMultiplexPauli(tolerance, coefficients, PauliX, control, _); + ApplyWithCA(Adjoint S, op, target); + } elif (pauli == PauliI) { + ApproximatelyApplyDiagonalUnitary(tolerance, coefficients, control); + } else { + fail $"MultiplexPauli failed. Invalid pauli {pauli}."; + } + } + + /// # Summary + /// Applies a Pauli Z rotation conditioned on an array of qubits. + /// + /// # Description + /// This applies the multiply controlled unitary operation that performs + /// rotations by angle $\theta_j$ about single-qubit Pauli operator $Z$ + /// when controlled by the $n$-qubit number state $\ket{j}$. + /// In particular, this operation can be represented by the unitary + /// + /// $$ + /// \begin{align} + /// U = \sum^{2^n-1}_{j=0} \ket{j}\bra{j} \otimes e^{i Z \theta_j}. + /// \end{align} + /// $$ + /// + /// # Input + /// ## coefficients + /// Array of up to $2^n$ coefficients $\theta_j$. The $j$th coefficient + /// indexes the number state $\ket{j}$ encoded in little-endian format. + /// + /// ## control + /// $n$-qubit control register that encodes number states $\ket{j}$ in + /// little-endian format. + /// + /// ## target + /// Single qubit register that is rotated by $e^{i P \theta_j}$. + /// + /// # Remarks + /// `coefficients` will be padded with elements $\theta_j = 0.0$ if + /// fewer than $2^n$ are specified. + /// + /// # References + /// - Synthesis of Quantum Logic Circuits + /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov + /// https://arxiv.org/abs/quant-ph/0406176 + /// + /// # See Also + /// - ApproximatelyMultiplexZ + operation MultiplexZ(coefficients : Double[], control : LittleEndian, target : Qubit) + : Unit is Adj + Ctl { + ApproximatelyMultiplexZ(0.0, coefficients, control, target); + } + + function _AnyOutsideToleranceD(tolerance : Double, coefficients : Double[]) : Bool { + // NB: We don't currently use Any / Mapped for this, as we want to be + // able to short-circuit. That should be implied by immutable + // semantics, but that's not yet the case. + for (coefficient in coefficients) { + if (AbsD(coefficient) >= tolerance) { + return true; } - else - { - fail $"MultiplexPauli failed. Invalid pauli {pauli}."; + } + return false; + } + + function _AnyOutsideToleranceCP(tolerance : Double, coefficients : ComplexPolar[]) : Bool { + for (coefficient in coefficients) { + if (AbsComplexPolar(coefficient) > tolerance) { + return true; } } - - adjoint invert; - controlled distribute; - controlled adjoint distribute; + return false; } - - + /// # Summary - /// Applies a Pauli Z rotation conditioned on an array of qubits. - /// - /// This applies the multiply-controlled unitary operation $U$ that performs + /// Applies a Pauli Z rotation conditioned on an array of qubits, truncating + /// small rotation angles according to a given tolerance. + /// + /// # Description + /// This applies the multiply controlled unitary operation that performs /// rotations by angle $\theta_j$ about single-qubit Pauli operator $Z$ /// when controlled by the $n$-qubit number state $\ket{j}$. + /// In particular, this operation can be represented by the unitary /// - /// $U = \sum^{2^n-1}_{j=0}\ket{j}\bra{j}\otimes e^{i Z \theta_j}$. + /// $$ + /// \begin{align} + /// U = \sum^{2^n-1}_{j=0} \ket{j}\bra{j} \otimes e^{i Z \theta_j}. + /// \end{align} + /// $$ /// /// # Input + /// ## tolerance + /// A tolerance below which small coefficients are truncated. + /// /// ## coefficients /// Array of up to $2^n$ coefficients $\theta_j$. The $j$th coefficient /// indexes the number state $\ket{j}$ encoded in little-endian format. @@ -98,53 +211,62 @@ namespace Microsoft.Quantum.Canon { /// - Synthesis of Quantum Logic Circuits /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov /// https://arxiv.org/abs/quant-ph/0406176 - operation MultiplexZ (coefficients : Double[], control : LittleEndian, target : Qubit) : Unit - { - body (...) - { + /// + /// # See Also + /// - MultiplexZ + operation ApproximatelyMultiplexZ(tolerance : Double, coefficients : Double[], control : LittleEndian, target : Qubit) : Unit is Adj + Ctl { + body (...) { // pad coefficients length at tail to a power of 2. let coefficientsPadded = Padded(-2 ^ Length(control!), 0.0, coefficients); - - if (Length(coefficientsPadded) == 1) - { + + if (Length(coefficientsPadded) == 1) { // Termination case - Exp([PauliZ], coefficientsPadded[0], [target]); - } - else - { + if (AbsD(coefficientsPadded[0]) > tolerance) { + Exp([PauliZ], coefficientsPadded[0], [target]); + } + } else { // Compute new coefficients. - let (coefficients0, coefficients1) = MultiplexZComputeCoefficients_(coefficientsPadded); - MultiplexZ(coefficients0, LittleEndian((control!)[0 .. Length(control!) - 2]), target); - CNOT((control!)[Length(control!) - 1], target); - MultiplexZ(coefficients1, LittleEndian((control!)[0 .. Length(control!) - 2]), target); - CNOT((control!)[Length(control!) - 1], target); + let (coefficients0, coefficients1) = _MultiplexZCoefficients(coefficientsPadded); + ApproximatelyMultiplexZ(tolerance,coefficients0, LittleEndian((control!)[0 .. Length(control!) - 2]), target); + if (_AnyOutsideToleranceD(tolerance, coefficients1)) { + within { + CNOT((control!)[Length(control!) - 1], target); + } apply { + ApproximatelyMultiplexZ(tolerance,coefficients1, LittleEndian((control!)[0 .. Length(control!) - 2]), target); + } + } } } - - adjoint invert; - - controlled (controlRegister, ...) - { + + controlled (controlRegister, ...) { // pad coefficients length to a power of 2. let coefficientsPadded = Padded(2 ^ (Length(control!) + 1), 0.0, Padded(-2 ^ Length(control!), 0.0, coefficients)); - let (coefficients0, coefficients1) = MultiplexZComputeCoefficients_(coefficientsPadded); - MultiplexZ(coefficients0, control, target); - Controlled X(controlRegister, target); - MultiplexZ(coefficients1, control, target); - Controlled X(controlRegister, target); + let (coefficients0, coefficients1) = _MultiplexZCoefficients(coefficientsPadded); + ApproximatelyMultiplexZ(tolerance,coefficients0, control, target); + if (_AnyOutsideToleranceD(tolerance,coefficients1)) { + within { + Controlled X(controlRegister, target); + } apply { + ApproximatelyMultiplexZ(tolerance,coefficients1, control, target); + } + } } - - controlled adjoint invert; } - - + /// # Summary - /// Applies an array of complex phases to numeric basis states of a register of qubits. - /// - /// That is, this implements the diagonal unitary operation $U$ that applies a complex phase + /// Applies an array of complex phases to numeric basis states of a register + /// of qubits. + /// + /// # Description + /// This operation implements a diagonal unitary that applies a complex phase /// $e^{i \theta_j}$ on the $n$-qubit number state $\ket{j}$. + /// In particular, this operation can be represented by the unitary /// - /// $U = \sum^{2^n-1}_{j=0}e^{i\theta_j}\ket{j}\bra{j}$. + /// $$ + /// \begin{align} + /// U = \sum^{2^n-1}_{j=0}e^{i\theta_j}\ket{j}\bra{j}. + /// \end{align} + /// $$ /// /// # Input /// ## coefficients @@ -163,62 +285,95 @@ namespace Microsoft.Quantum.Canon { /// - Synthesis of Quantum Logic Circuits /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov /// https://arxiv.org/abs/quant-ph/0406176 - operation ApplyDiagonalUnitary (coefficients : Double[], qubits : LittleEndian) : Unit - { - body (...) - { - if (Length(qubits!) == 0) - { - fail $"operation ApplyDiagonalUnitary -- Number of qubits must be greater than 0."; - } - - // pad coefficients length at tail to a power of 2. - let coefficientsPadded = Padded(-2 ^ Length(qubits!), 0.0, coefficients); - - // Compute new coefficients. - let (coefficients0, coefficients1) = MultiplexZComputeCoefficients_(coefficientsPadded); - MultiplexZ(coefficients1, LittleEndian((qubits!)[0 .. Length(qubits!) - 2]), (qubits!)[Length(qubits!) - 1]); - - if (Length(coefficientsPadded) == 2) - { - // Termination case + /// + /// # See Also + /// - ApproximatelyApplyDiagonalUnitary + operation ApplyDiagonalUnitary(coefficients : Double[], qubits : LittleEndian) : Unit is Adj + Ctl { + ApproximatelyApplyDiagonalUnitary(0.0, coefficients, qubits); + } + + /// # Summary + /// Applies an array of complex phases to numeric basis states of a register + /// of qubits, truncating small rotation angles according to a given + /// tolerance. + /// + /// # Description + /// This operation implements a diagonal unitary that applies a complex phase + /// $e^{i \theta_j}$ on the $n$-qubit number state $\ket{j}$. + /// In particular, this operation can be represented by the unitary + /// + /// $$ + /// \begin{align} + /// U = \sum^{2^n-1}_{j=0}e^{i\theta_j}\ket{j}\bra{j}. + /// \end{align} + /// $$ + /// + /// # Input + /// ## tolerance + /// A tolerance below which small coefficients are truncated. + /// + /// ## coefficients + /// Array of up to $2^n$ coefficients $\theta_j$. The $j$th coefficient + /// indexes the number state $\ket{j}$ encoded in little-endian format. + /// + /// ## control + /// $n$-qubit control register that encodes number states $\ket{j}$ in + /// little-endian format. + /// + /// # Remarks + /// `coefficients` will be padded with elements $\theta_j = 0.0$ if + /// fewer than $2^n$ are specified. + /// + /// # References + /// - Synthesis of Quantum Logic Circuits + /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov + /// https://arxiv.org/abs/quant-ph/0406176 + /// + /// # See Also + /// - ApplyDiagonalUnitary + operation ApproximatelyApplyDiagonalUnitary(tolerance : Double, coefficients : Double[], qubits : LittleEndian) + : Unit is Adj + Ctl { + if (IsEmpty(qubits!)) { + fail "operation ApplyDiagonalUnitary -- Number of qubits must be greater than 0."; + } + + // pad coefficients length at tail to a power of 2. + let coefficientsPadded = Padded(-2 ^ Length(qubits!), 0.0, coefficients); + + // Compute new coefficients. + let (coefficients0, coefficients1) = _MultiplexZCoefficients(coefficientsPadded); + ApproximatelyMultiplexZ(tolerance,coefficients1, LittleEndian((qubits!)[0 .. Length(qubits!) - 2]), (qubits!)[Length(qubits!) - 1]); + + if (Length(coefficientsPadded) == 2) { + // Termination case + if (AbsD(coefficients0[0]) > tolerance) { Exp([PauliI], 1.0 * coefficients0[0], qubits!); } - else - { - ApplyDiagonalUnitary(coefficients0, LittleEndian((qubits!)[0 .. Length(qubits!) - 2])); - } + } else { + ApproximatelyApplyDiagonalUnitary(tolerance, coefficients0, LittleEndian(Most(qubits!))); } - - adjoint invert; - controlled distribute; - controlled adjoint distribute; } - - + /// # Summary /// Implementation step of multiply-controlled Z rotations. /// # See Also /// - Microsoft.Quantum.Canon.MultiplexZ - function MultiplexZComputeCoefficients_ (coefficients : Double[]) : (Double[], Double[]) - { + function _MultiplexZCoefficients(coefficients : Double[]) : (Double[], Double[]) { let newCoefficientsLength = Length(coefficients) / 2; mutable coefficients0 = new Double[newCoefficientsLength]; mutable coefficients1 = new Double[newCoefficientsLength]; - - for (idxCoeff in 0 .. newCoefficientsLength - 1) - { + + for (idxCoeff in 0 .. newCoefficientsLength - 1) { set coefficients0 w/= idxCoeff <- 0.5 * (coefficients[idxCoeff] + coefficients[idxCoeff + newCoefficientsLength]); set coefficients1 w/= idxCoeff <- 0.5 * (coefficients[idxCoeff] - coefficients[idxCoeff + newCoefficientsLength]); } - + return (coefficients0, coefficients1); } - - + /// # Summary - /// Applies an array of operations controlled by an array of number states. - /// + /// Applies an array of operations controlled by an array of number states. + /// /// That is, applies Multiply-controlled unitary operation $U$ that applies a /// unitary $V_j$ when controlled by $n$-qubit number state $\ket{j}$. /// @@ -239,42 +394,36 @@ namespace Microsoft.Quantum.Canon { /// # Remarks /// `coefficients` will be padded with identity elements if /// fewer than $2^n$ are specified. This implementation uses - /// $n-1$ ancilla qubits. + /// $n - 1$ auxillary qubits. /// /// # References /// - Toward the first quantum simulation with quantum speedup /// Andrew M. Childs, Dmitri Maslov, Yunseong Nam, Neil J. Ross, Yuan Su /// https://arxiv.org/abs/1711.10980 - operation MultiplexOperations<'T> (unitaries : ('T => Unit is Adj + Ctl)[], index : LittleEndian, target : 'T) : Unit - { - body (...) - { - if (Length(index!) == 0) - { - fail $"MultiplexOperations failed. Number of index qubits must be greater than 0."; - } - - if (Length(unitaries) > 0) - { - let ancilla = new Qubit[0]; - _MultiplexOperations(unitaries, ancilla, index, target); - } + operation MultiplexOperations<'T> (unitaries : ('T => Unit is Adj + Ctl)[], index : LittleEndian, target : 'T) + : Unit is Adj + Ctl { + if (Length(index!) == 0) { + fail $"MultiplexOperations failed. Number of index qubits must be greater than 0."; + } + + if (Length(unitaries) > 0) { + let auxillaryRegister = new Qubit[0]; + _MultiplexOperations(unitaries, auxillaryRegister, index, target); } - - adjoint invert; - controlled distribute; - controlled adjoint distribute; } - - + /// # Summary /// Implementation step of MultiplexOperations. /// # See Also /// - Microsoft.Quantum.Canon.MultiplexOperations - operation _MultiplexOperations<'T>(unitaries : ('T => Unit is Adj + Ctl)[], ancilla : Qubit[], index : LittleEndian, target : 'T) : Unit - { - body (...) - { + operation _MultiplexOperations<'T>( + unitaries : ('T => Unit is Adj + Ctl)[], + auxillaryRegister : Qubit[], + index : LittleEndian, + target : 'T + ) + : Unit is Adj + Ctl { + body (...) { let nIndex = Length(index!); let nStates = 2 ^ nIndex; let nUnitaries = Length(unitaries); @@ -283,59 +432,48 @@ namespace Microsoft.Quantum.Canon { let rightUnitaries = unitaries[0 .. nUnitariesRight - 1]; let leftUnitaries = unitaries[nUnitariesRight .. nUnitariesLeft - 1]; let newControls = LittleEndian((index!)[0 .. nIndex - 2]); - - if (nUnitaries > 0) - { - if (Length(ancilla) == 1 and nIndex == 0) - { + + if (nUnitaries > 0) { + if (Length(auxillaryRegister) == 1 and nIndex == 0) { // Termination case - Controlled unitaries[0](ancilla, target); - } - elif (Length(ancilla) == 0 and nIndex >= 1) - { + Controlled unitaries[0](auxillaryRegister, target); + } elif (Length(auxillaryRegister) == 0 and nIndex >= 1) { // Start case - let newAncilla = [(index!)[Length(index!) - 1]]; - - if (nUnitariesLeft > 0) - { - _MultiplexOperations(leftUnitaries, newAncilla, newControls, target); + let newAuxQubit = Tail(index!); + + if (nUnitariesLeft > 0) { + _MultiplexOperations(leftUnitaries, [newAuxQubit], newControls, target); } - - X(newAncilla[0]); - _MultiplexOperations(rightUnitaries, newAncilla, newControls, target); - X(newAncilla[0]); - } - else - { - // Recursion that reduces nIndex by 1 & sets Length(ancilla) to 1. - using (newAncilla = Qubit[1]) - { - Controlled X(ancilla + [(index!)[Length(index!) - 1]], newAncilla[0]); - - if (nUnitariesLeft > 0) - { - _MultiplexOperations(leftUnitaries, newAncilla, newControls, target); + + within { + X(newAuxQubit); + } apply { + _MultiplexOperations(rightUnitaries, [newAuxQubit], newControls, target); + } + } else { + // Recursion that reduces nIndex by 1 & sets Length(auxillaryRegister) to 1. + using (newAuxQubit = Qubit()) { + within { + Controlled X(auxillaryRegister + [(index!)[Length(index!) - 1]], newAuxQubit); + } apply { + if (nUnitariesLeft > 0) { + _MultiplexOperations(leftUnitaries, [newAuxQubit], newControls, target); + } + + within { + Controlled X(auxillaryRegister, newAuxQubit); + } apply { + _MultiplexOperations(rightUnitaries, [newAuxQubit], newControls, target); + } } - - Controlled X(ancilla, newAncilla[0]); - _MultiplexOperations(rightUnitaries, newAncilla, newControls, target); - Controlled X(ancilla, newAncilla[0]); - Controlled X(ancilla + [(index!)[Length(index!) - 1]], newAncilla[0]); } } } } - - adjoint invert; - - controlled (controlRegister, ...) - { + + controlled (controlRegister, ...) { _MultiplexOperations(unitaries, controlRegister, index, target); } - - controlled adjoint invert; } - -} - +} diff --git a/Standard/src/Preparation/StatePreparation.qs b/Standard/src/Preparation/Arbitrary.qs similarity index 82% rename from Standard/src/Preparation/StatePreparation.qs rename to Standard/src/Preparation/Arbitrary.qs index ec3f9a6d845..8b12642d855 100644 --- a/Standard/src/Preparation/StatePreparation.qs +++ b/Standard/src/Preparation/Arbitrary.qs @@ -12,8 +12,8 @@ namespace Microsoft.Quantum.Preparation { // from the computational basis state $\ket{0...0}$. /// # Summary - /// Returns an operation that prepares the given quantum state. - /// + /// Returns an operation that prepares the given quantum state. + /// /// The returned operation $U$ prepares an arbitrary quantum /// state $\ket{\psi}$ with positive coefficients $\alpha_j\ge 0$ from /// the $n$-qubit computational basis state $\ket{0...0}$. @@ -61,8 +61,8 @@ namespace Microsoft.Quantum.Preparation { } /// # Summary - /// Returns an operation that prepares a specific quantum state. - /// + /// Returns an operation that prepares a specific quantum state. + /// /// The returned operation $U$ prepares an arbitrary quantum /// state $\ket{\psi}$ with complex coefficients $r_j e^{i t_j}$ from /// the $n$-qubit computational basis state $\ket{0...0}$. @@ -108,11 +108,11 @@ namespace Microsoft.Quantum.Preparation { function StatePreparationComplexCoefficients (coefficients : ComplexPolar[]) : (LittleEndian => Unit is Adj + Ctl) { return PrepareArbitraryState(coefficients, _); } - - + + /// # Summary - /// Returns an operation that prepares a given quantum state. - /// + /// Returns an operation that prepares a given quantum state. + /// /// The returned operation $U$ prepares an arbitrary quantum /// state $\ket{\psi}$ with complex coefficients $r_j e^{i t_j}$ from /// the $n$-qubit computational basis state $\ket{0...0}$. @@ -144,12 +144,16 @@ namespace Microsoft.Quantum.Preparation { /// - Synthesis of Quantum Logic Circuits /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov /// https://arxiv.org/abs/quant-ph/0406176 - operation PrepareArbitraryState (coefficients : ComplexPolar[], qubits : LittleEndian) : Unit is Adj + Ctl { + operation PrepareArbitraryState(coefficients : ComplexPolar[], qubits : LittleEndian) : Unit is Adj + Ctl { + ApproximatelyPrepareArbitraryState(0.0, coefficients, qubits); + } + + /// TODO + operation ApproximatelyPrepareArbitraryState(tolerance : Double, coefficients : ComplexPolar[], qubits : LittleEndian) : Unit is Adj + Ctl { // pad coefficients at tail length to a power of 2. let coefficientsPadded = Padded(-2 ^ Length(qubits!), ComplexPolar(0.0, 0.0), coefficients); let target = (qubits!)[0]; - let op = (Adjoint _PrepareArbitraryState(coefficientsPadded, _, _))(_, target); - + let op = (Adjoint _ApproximatelyPrepareArbitraryState(tolerance, coefficientsPadded, _, _))(_, target); op( // Determine what controls to apply to `op`. Length(qubits!) > 1 @@ -158,40 +162,43 @@ namespace Microsoft.Quantum.Preparation { ); } - /// # Summary /// Implementation step of arbitrary state preparation procedure. /// /// # See Also /// - PrepareArbitraryState /// - Microsoft.Quantum.Canon.MultiplexPauli - operation _PrepareArbitraryState(coefficients : ComplexPolar[], control : LittleEndian, target : Qubit) : Unit is Adj + Ctl - { + operation _ApproximatelyPrepareArbitraryState(tolerance: Double, coefficients : ComplexPolar[], control : LittleEndian, target : Qubit) + : Unit is Adj + Ctl { // For each 2D block, compute disentangling single-qubit rotation parameters let (disentanglingY, disentanglingZ, newCoefficients) = _StatePreparationSBMComputeCoefficients(coefficients); - MultiplexPauli(disentanglingZ, PauliZ, control, target); - MultiplexPauli(disentanglingY, PauliY, control, target); - + if (_AnyOutsideToleranceD(tolerance, disentanglingZ)) { + ApproximatelyMultiplexPauli(tolerance, disentanglingZ, PauliZ, control, target); + } + if (_AnyOutsideToleranceD(tolerance, disentanglingY)) { + ApproximatelyMultiplexPauli(tolerance, disentanglingY, PauliY, control, target); + } // target is now in |0> state up to the phase given by arg of newCoefficients. - + // Continue recursion while there are control qubits. - if (Length(control!) == 0) - { + if (Length(control!) == 0) { let (abs, arg) = newCoefficients[0]!; - Exp([PauliI], -1.0 * arg, [target]); - } - else - { - let newControl = LittleEndian((control!)[1 .. Length(control!) - 1]); - let newTarget = (control!)[0]; - _PrepareArbitraryState(newCoefficients, newControl, newTarget); + if (AbsD(arg) > tolerance) { + Exp([PauliI], -1.0 * arg, [target]); + } + } else { + if (_AnyOutsideToleranceCP(tolerance, newCoefficients)) { + let newControl = LittleEndian((control!)[1 .. Length(control!) - 1]); + let newTarget = (control!)[0]; + _ApproximatelyPrepareArbitraryState(tolerance,newCoefficients, newControl, newTarget); + } } } - - + + /// # Summary - /// Computes the Bloch sphere coordinates for a single-qubit state. - /// + /// Computes the Bloch sphere coordinates for a single-qubit state. + /// /// Given two complex numbers $a0, a1$ that represent the qubit state, computes coordinates /// on the Bloch sphere such that /// $a0 \ket{0} + a1 \ket{1} = r e^{it}(e^{-i \phi /2}\cos{(\theta/2)}\ket{0}+e^{i \phi /2}\sin{(\theta/2)}\ket{1})$. @@ -204,8 +211,7 @@ namespace Microsoft.Quantum.Preparation { /// /// # Output /// A tuple containing `(ComplexPolar(r, t), phi, theta)`. - function BlochSphereCoordinates (a0 : ComplexPolar, a1 : ComplexPolar) : (ComplexPolar, Double, Double) - { + function BlochSphereCoordinates (a0 : ComplexPolar, a1 : ComplexPolar) : (ComplexPolar, Double, Double) { let abs0 = AbsComplexPolar(a0); let abs1 = AbsComplexPolar(a1); let arg0 = ArgComplexPolar(a0); From f7be904516686d1fe0d87ac86cb8fcdaf9398a07 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Tue, 17 Dec 2019 13:48:26 -0800 Subject: [PATCH 09/43] Update QML feature branch with latest master. (#191) * Clarify the restriction on the number of bits for IntAsBoolArray (#171) * Clarify the restriction on the number of bits for IntAsBoolArray This should fix #166 by providing a more specific error message. * Update Standard/src/Convert/Convert.qs Co-Authored-By: Chris Granade * Allow to have bits = 0 Looks like our tests assume that number = 0 with bits = 0 is a valid scenario; updating the change to account for that * Package updates (#188) * Updated sourcelink on QML to match new version. --- Chemistry/src/DataModel/DataModel.csproj | 6 +++--- Chemistry/src/Jupyter/Jupyter.csproj | 6 +++--- Chemistry/tests/ChemistryTests/QSharpTests.csproj | 9 ++++++--- Chemistry/tests/DataModelTests/CSharpTests.csproj | 11 +++++++---- Chemistry/tests/JupyterTests/JupyterTests.csproj | 9 ++++++--- Chemistry/tests/SamplesTests/SamplesTests.csproj | 11 +++++++---- .../SerializationTests/SerializationTests.csproj | 11 +++++++---- Chemistry/tests/SystemTests/SystemTests.csproj | 9 ++++++--- MachineLearning/src/DataModel/DataModel.csproj | 2 +- Numerics/src/Numerics.csproj | 2 +- Numerics/tests/NumericsTests.csproj | 9 ++++++--- Standard/src/Convert/Convert.qs | 4 +++- Standard/src/Standard.csproj | 5 +---- Standard/tests/Standard.Tests.csproj | 6 +++--- 14 files changed, 60 insertions(+), 40 deletions(-) diff --git a/Chemistry/src/DataModel/DataModel.csproj b/Chemistry/src/DataModel/DataModel.csproj index a0f7a2db3ab..ef1a835a77e 100644 --- a/Chemistry/src/DataModel/DataModel.csproj +++ b/Chemistry/src/DataModel/DataModel.csproj @@ -36,8 +36,8 @@ - - + + @@ -53,6 +53,6 @@ - + diff --git a/Chemistry/src/Jupyter/Jupyter.csproj b/Chemistry/src/Jupyter/Jupyter.csproj index 5993652baef..5a480fa6b97 100644 --- a/Chemistry/src/Jupyter/Jupyter.csproj +++ b/Chemistry/src/Jupyter/Jupyter.csproj @@ -27,8 +27,8 @@ - - + + @@ -38,6 +38,6 @@ - + diff --git a/Chemistry/tests/ChemistryTests/QSharpTests.csproj b/Chemistry/tests/ChemistryTests/QSharpTests.csproj index e3ccd565594..94bfd34ffc9 100644 --- a/Chemistry/tests/ChemistryTests/QSharpTests.csproj +++ b/Chemistry/tests/ChemistryTests/QSharpTests.csproj @@ -10,11 +10,14 @@ - + - - + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + diff --git a/Chemistry/tests/DataModelTests/CSharpTests.csproj b/Chemistry/tests/DataModelTests/CSharpTests.csproj index c3243e55cba..59db46d6537 100644 --- a/Chemistry/tests/DataModelTests/CSharpTests.csproj +++ b/Chemistry/tests/DataModelTests/CSharpTests.csproj @@ -23,12 +23,15 @@ - + - - - + + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + diff --git a/Chemistry/tests/JupyterTests/JupyterTests.csproj b/Chemistry/tests/JupyterTests/JupyterTests.csproj index 4b322d832da..bb21b47bf52 100644 --- a/Chemistry/tests/JupyterTests/JupyterTests.csproj +++ b/Chemistry/tests/JupyterTests/JupyterTests.csproj @@ -17,10 +17,13 @@ - + - - + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + diff --git a/Chemistry/tests/SamplesTests/SamplesTests.csproj b/Chemistry/tests/SamplesTests/SamplesTests.csproj index ecf44796b07..410bb53582c 100644 --- a/Chemistry/tests/SamplesTests/SamplesTests.csproj +++ b/Chemistry/tests/SamplesTests/SamplesTests.csproj @@ -17,12 +17,15 @@ - + - - - + + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + diff --git a/Chemistry/tests/SerializationTests/SerializationTests.csproj b/Chemistry/tests/SerializationTests/SerializationTests.csproj index daab1c45c20..ff8ea481ff9 100644 --- a/Chemistry/tests/SerializationTests/SerializationTests.csproj +++ b/Chemistry/tests/SerializationTests/SerializationTests.csproj @@ -16,10 +16,13 @@ - - - - + + + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + diff --git a/Chemistry/tests/SystemTests/SystemTests.csproj b/Chemistry/tests/SystemTests/SystemTests.csproj index 638b7bfa7cd..071091e0595 100644 --- a/Chemistry/tests/SystemTests/SystemTests.csproj +++ b/Chemistry/tests/SystemTests/SystemTests.csproj @@ -17,11 +17,14 @@ - + - - + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + diff --git a/MachineLearning/src/DataModel/DataModel.csproj b/MachineLearning/src/DataModel/DataModel.csproj index 24297486e72..0b9c2b15adb 100644 --- a/MachineLearning/src/DataModel/DataModel.csproj +++ b/MachineLearning/src/DataModel/DataModel.csproj @@ -42,6 +42,6 @@ - + diff --git a/Numerics/src/Numerics.csproj b/Numerics/src/Numerics.csproj index 61d5bf1c417..e385cfc0376 100644 --- a/Numerics/src/Numerics.csproj +++ b/Numerics/src/Numerics.csproj @@ -34,6 +34,6 @@ - + diff --git a/Numerics/tests/NumericsTests.csproj b/Numerics/tests/NumericsTests.csproj index 4a7b29edfa4..a8ba4731346 100644 --- a/Numerics/tests/NumericsTests.csproj +++ b/Numerics/tests/NumericsTests.csproj @@ -17,9 +17,12 @@ - - - + + + + all + runtime; build; native; contentfiles; analyzers; buildtransitive + diff --git a/Standard/src/Convert/Convert.qs b/Standard/src/Convert/Convert.qs index b69b24bd51d..85a89b88367 100644 --- a/Standard/src/Convert/Convert.qs +++ b/Standard/src/Convert/Convert.qs @@ -76,8 +76,10 @@ namespace Microsoft.Quantum.Convert { /// An array of boolean values representing `number`. /// /// # Remarks - /// The input `number` must be at most $2^{\texttt{bits}} - 1$. + /// The input `bits` must be between 0 and 63. + /// The input `number` must be between 0 and $2^{\texttt{bits}} - 1$. function IntAsBoolArray(number : Int, bits : Int) : Bool[] { + Fact(bits >= 0 and bits <= 63, $"`bits` must be between 0 and 63 {2^bits}"); EqualityFactB(number >= 0 and number < 2 ^ bits, true, $"`number` must be between 0 and 2^`bits` - 1"); mutable outputBits = new Bool[bits]; mutable tempInt = number; diff --git a/Standard/src/Standard.csproj b/Standard/src/Standard.csproj index ad5987d5a62..27ba8c776f0 100644 --- a/Standard/src/Standard.csproj +++ b/Standard/src/Standard.csproj @@ -31,9 +31,6 @@ - - - - + diff --git a/Standard/tests/Standard.Tests.csproj b/Standard/tests/Standard.Tests.csproj index c05bed8b8ca..dc037e959cf 100644 --- a/Standard/tests/Standard.Tests.csproj +++ b/Standard/tests/Standard.Tests.csproj @@ -22,9 +22,9 @@ - - - + + + From 38120f20de54dbf53b71d98e21a064de7148b151 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Thu, 2 Jan 2020 12:36:35 -0800 Subject: [PATCH 10/43] Use new UDTs to consolidate and simplify training / validation API. (#187) * Begin simplifying training API. * Fix for TrainQcccSequential. * Simplify OneStochasticTrainingEpoch. * Simplified training API a little more. * Remove unused operation. * Started Chunks function. * Began removing ExtractMiniBatch. * Simplify training API with new SequentialModel UDT. * Further consolidate APIs. * Consolidated training and classification APIs further. * Fixed bug in Chunks. * Regression test for prev commit. * Make internal implementation of sequential model trainer private. * Make APIs more uniform. * Added new array function to simplify Misclassifications. * Remove ExtractMiniBatch. * Removed deprecated operations. * Fix type in Examples. * More progress removing private fns and ops from public API. * Further consolidated API surface. * Addressing feedback. --- MachineLearning/src/DataModel/Interop.cs | 91 +--- MachineLearning/src/Runtime/Circuits.qs | 386 ---------------- MachineLearning/src/Runtime/Classification.qs | 216 ++++----- MachineLearning/src/Runtime/Deprecated.qs | 84 ---- MachineLearning/src/Runtime/Examples.qs | 174 ++++--- .../src/Runtime/GradientEstimation.qs | 122 +++++ MachineLearning/src/Runtime/InputEncoding.qs | 29 +- MachineLearning/src/Runtime/Training.qs | 427 ++++++++---------- MachineLearning/src/Runtime/Types.qs | 19 + MachineLearning/src/Runtime/Utils.qs | 5 + MachineLearning/src/Runtime/Validation.qs | 71 ++- Standard/src/Arrays/Arrays.qs | 27 ++ Standard/src/Arrays/Filter.qs | 27 ++ Standard/tests/ArrayTests.qs | 30 +- 14 files changed, 633 insertions(+), 1075 deletions(-) delete mode 100644 MachineLearning/src/Runtime/Circuits.qs delete mode 100644 MachineLearning/src/Runtime/Deprecated.qs create mode 100644 MachineLearning/src/Runtime/GradientEstimation.qs diff --git a/MachineLearning/src/DataModel/Interop.cs b/MachineLearning/src/DataModel/Interop.cs index 262b17365b2..a59d812d233 100644 --- a/MachineLearning/src/DataModel/Interop.cs +++ b/MachineLearning/src/DataModel/Interop.cs @@ -176,8 +176,8 @@ public static List PartialLocalLayer(long[] indices, char pauli) /// /// Creates a cyclic block of nQubits controlled rotations that starts - /// with contol qubit (nQubits-1), target qubit (cspan-1) % n , followed by the - /// ladder of entanglers with control qubit iq and target qubit (iq+cspan) % n + /// with control qubit (nQubits-1), target qubit (cspan-1) % nQubits , followed by a + /// ladder of entanglers with control qubits iq and target qubit (iq+cspan) % nQubits /// /// Number of qubits to entangle /// @@ -220,68 +220,6 @@ public static void reindex(List struc) } } - public void QcccTrainSequential(IQArray> parameterSource, IQArray> trainingSet, IQArray trainingLabels, IQArray> trainingSchedule, - IQArray> validationSchedule, double learningRate, double tolerance, long miniBatchSize, long maxEpochs, long nMeasurements, uint randomizationSeed) - { - var sim = new QuantumSimulator(false, randomizationSeed); - (this._cachedParameters, this._bias) = - TrainQcccSequential.Run(sim, this._nQubits, this._structure, parameterSource, trainingSet, trainingLabels, trainingSchedule, validationSchedule, learningRate, tolerance, miniBatchSize, maxEpochs, nMeasurements).Result; - } - public void QcccTrainSequential(List parameterSource, List trainingSet, List trainingLabels, List trainingSchedule, - List validationSchedule, double learningRate, double tolerance, long miniBatchSize, long maxEpochs, long nMeasurements, uint randomizationSeed) - { - QcccTrainSequential(Qonvert.ToQ(parameterSource), Qonvert.ToQ(trainingSet), Qonvert.ToQ(trainingLabels), Qonvert.ToQ(trainingSchedule), - Qonvert.ToQ(validationSchedule), learningRate, tolerance, miniBatchSize, maxEpochs, nMeasurements, randomizationSeed); - } - - public void QcccTrainParallel(IQArray> parameterSource, IQArray> trainingSet, IQArray trainingLabels, IQArray> trainingSchedule, - IQArray> validationSchedule, double learningRate, double tolerance, long miniBatchSize, long maxEpochs, long nMeasurements, uint randomizationSeed) - { - var simAll = new List(parameterSource.Count); - var resultsAll = new List<(IQArray, double)>(parameterSource.Count); - var parameterComb = new List>>(parameterSource.Count); - - var indices = new int[parameterSource.Count]; - for (int j = 0; j < parameterSource.Count; j++) - { - indices[j] = j; - simAll.Add(new QuantumSimulator(false, randomizationSeed)); - resultsAll.Add((new QArray(),0.0)); - parameterComb.Add(new QArray>(new IQArray[] { parameterSource[j] })); //Isolating parameter starts - one per thread - } - Parallel.ForEach(indices, - (j) => - { - - var rslt = - TrainQcccSequential.Run(simAll[j], this._nQubits, this._structure, parameterComb[j], trainingSet, trainingLabels, trainingSchedule, validationSchedule, learningRate, tolerance, miniBatchSize, maxEpochs, nMeasurements).Result; - resultsAll[j] = rslt; - } - ); - //Estimated parameters and biases for each proposed parameter start. Now postprocess - long bestValidation = long.MaxValue; - int bestJ = -1; - var sim = new QuantumSimulator(false, randomizationSeed); - for (int j = 0; j < parameterSource.Count; j++) - { - var (pars, bias) = resultsAll[j]; - long misses = CountValidationMisses.Run(sim, tolerance, this._nQubits, trainingSet, trainingLabels, validationSchedule, this._structure, pars, bias, nMeasurements).Result; - if (bestValidation > misses) - { - bestValidation = misses; - bestJ = j; - } - } - (this._cachedParameters, this._bias) = resultsAll[bestJ]; - } //QcccTrainParallel - - public void QcccTrainParallel(List parameterSource, List trainingSet, List trainingLabels, List trainingSchedule, - List validationSchedule, double learningRate, double tolerance, long miniBatchSize, long maxEpochs, long nMeasurements, uint randomizationSeed) - { - QcccTrainParallel(Qonvert.ToQ(parameterSource), Qonvert.ToQ(trainingSet), Qonvert.ToQ(trainingLabels), Qonvert.ToQ(trainingSchedule), - Qonvert.ToQ(validationSchedule), learningRate, tolerance, miniBatchSize, maxEpochs, nMeasurements, randomizationSeed); - } - public long CountMisclassifications(double tolerance, IQArray> samples, IQArray knownLabels, IQArray> validationSchedule, long nMeasurements, uint randomizationSeed) { if (this.isTrained) @@ -304,31 +242,6 @@ public long CountMisclassifications(double tolerance, List samples, Li return CountMisclassifications(tolerance, Qonvert.ToQ(samples), Qonvert.ToQ(knownLabels), Qonvert.ToQ(validationSchedule), nMeasurements, randomizationSeed); } - //EstimateClassificationProbabilitiesClassicalDataAdapter(samples: Double[][], schedule: Int[][], nQubits: Int, gates: Int[][], param: Double[], measCount: Int): Double[] - public double[] EstimateClassificationProbabilities(double tolerance, IQArray> samples, IQArray> schedule, long nMeasurements, uint randomizationSeed) - { - if (this.isTrained) - { - var sim = new QuantumSimulator(false, randomizationSeed); - IQArray probs = EstimateClassificationProbabilitiesClassicalDataAdapter.Run(sim, tolerance, samples, schedule, this._nQubits, this._structure, this.CachedParameters, nMeasurements).Result; - return probs.ToArray(); - } - return new double[] { -1.0 }; - } - - public double[] EstimateClassificationProbabilities(double tolerance, List samples, List schedule, long nMeasurements, uint randomizationSeed) - { - return EstimateClassificationProbabilities(tolerance, Qonvert.ToQ(samples), Qonvert.ToQ(schedule), nMeasurements, randomizationSeed); - } - - public double[] EstimateClassificationProbabilities(double tolerance, List samples, long nMeasurements, uint randomizationSeed) - { - List sched = new List(1); - sched.Add(new long[] { 0L, 1L, (long)(samples.Count - 1) }); - return EstimateClassificationProbabilities(tolerance, Qonvert.ToQ(samples), Qonvert.ToQ(sched), nMeasurements, randomizationSeed); - } - - } //class ClassificationModel } diff --git a/MachineLearning/src/Runtime/Circuits.qs b/MachineLearning/src/Runtime/Circuits.qs deleted file mode 100644 index 49df95cd18e..00000000000 --- a/MachineLearning/src/Runtime/Circuits.qs +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Math; - open Microsoft.Quantum.Arrays; - open Microsoft.Quantum.Arithmetic; - open Microsoft.Quantum.Canon; - open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Convert; - open Microsoft.Quantum.Diagnostics; - open Microsoft.Quantum.Preparation; - open Microsoft.Quantum.Characterization; - - /// WARNING: the downstream EstimateFrequencyA counts the frequency of Zero - - operation measureLastQubit(nQubits : Int): (Qubit[] => Result) { - let paulis = ConstantArray(nQubits, PauliI) w/ (nQubits - 1) <- PauliZ; - return Measure(paulis, _); - } - - operation _endToEndPreparation(enc: (LittleEndian => Unit is Adj + Ctl), parameters: Double[], gates: GateSequence, reg: Qubit[]): Unit is Adj - { - enc(LittleEndian(reg)); - _ApplyGates(parameters, gates, reg); - } - - operation endToEndPreparation(enc: (LittleEndian => Unit is Adj + Ctl), parameters: Double[], gates: GateSequence) : (Qubit[] => Unit is Adj) - { - return _endToEndPreparation(enc,parameters, gates, _); - } - - function collectNegativeLocs(cNegative: Int, coefficients : ComplexPolar[]) : Int[] - { - mutable negLocs = ConstantArray(cNegative, -1); - mutable nlx = 0; - for (idx in 0 .. Length(coefficients) - 1) - { - let (r,a) = (coefficients[idx])!; - if (AbsD(a - PI()) < 1E-9) { - if (nlx < cNegative) - { - set negLocs w/= nlx <- idx; - set nlx = nlx+1; - } - } - } - return negLocs; - } //collectNegativeLocs - - // NOTE: the last qubit of 'reg' in this context is the auxillary qubit used in the Hadamard test. - operation _endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, reg: Qubit[]): Unit is Adj + Ctl { - let L = Length(reg) - 1; - let g1 = _ApplyGates(param1,gates1,_); - let g2 = _ApplyGates(param2,gates2,_); - - enc(LittleEndian(reg[0..(L-1)])); - within { - H(Tail(reg)); - } apply { - (Controlled g1) ([reg[L]], reg[0..(L-1)]); - within { - X(Tail(reg)); - } apply { - (Controlled g2) ([reg[L]], reg[0..(L-1)]); - (Controlled Z) ([reg[L]], reg[(L-1)]); - } - } - } - - operation endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl),param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence) : (Qubit[] => Unit is Adj) { - return _endToEndHTcircuit(enc,param1, gates1, param2, gates2, _); - } - - operation HardamardTestPhysical(enc2: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, nQubits: Int, nMeasurements : Int): Double - { - return 1.0-EstimateFrequencyA(endToEndHTcircuit(enc2,param1,gates1,param2,gates2),measureLastQubit(nQubits), nQubits, nMeasurements); - } - - - - /// # Summary - /// polymorphic classical/quantum gradient estimator - /// - /// # Input - /// ## param - /// circuit parameters - /// - /// ## gates - /// sequence of gates in the circuits - /// - /// ## sg - /// generates quantum encoding of a subject sample (either simulated or true) - /// - /// ## measCount - /// number of true quantum measurements to estimate probabilities. - /// IMPORTANT: measCount==0 implies simulator deployment - /// - /// # Output - /// the gradient - /// - operation EstimateGradient(param : Double[], gates: GateSequence, sg: StateGenerator, nMeasurements : Int) : (Double[]) { - //Synopsis: Suppose (param,gates) define Circ0 - //Suppose (param1,gates1) define Circ1 that implements one-gate derivative of Circ0 - //The expectation derivative is then 2 Re[] = - // Re[] - Re[] - //We observe SEE THEORY that for (Circ1)=(Circ0)' , Re[]==0 - //Thus we are left to compute Re[] = - // 1 - 1/2 < (Z \otimes Id) Circ0 psi - Circ1 psi | (Z \otimes Id) Circ0 psi - Circ1 psi> - //i.e., 1 - HadamardTestResultHack(Circ1,[Z],Circ0) - - - //Now, suppose a gate at which we differentiate is the (Controlled R(\theta))([k0,k1,...,kr],[target]) - //and we want a unitary description of its \theta-derivative. It can be written as - // 1/2 {(Controlled R(\theta'))([k0,k1,...,kr],[target]) - (Controlled Z)([k1,...,kr],[k0])(Controlled R(\theta'))([k0,k1,...,kr],[target])} - let pC = Length(param); - mutable grad = ConstantArray(pC, 0.0); - mutable paramShift = param + [0.0]; - let nQubits = MaxI(NQubitsRequired(gates), sg::NQubits); - - for (gate in gates!) { - set paramShift w/= gate::Index <- (param[gate::Index] + PI()); //Shift the corresponding parameter - // NB: This the *antiderivative* of the bracket - let newDer = 2.0 * HardamardTestPhysical( - sg::Apply, param, gates, paramShift, gates, nQubits + 1, nMeasurements - ) - 1.0; - if (IsEmpty(gate::Span::ControlIndices)) { - //uncontrolled gate - set grad w/= gate::Index <- grad[gate::Index] + newDer; - } else { - //controlled gate - set paramShift w/=gate::Index<-(param[gate::Index]+3.0 * PI()); - //Assumption: any rotation R has the property that R(\theta+2 Pi)=(-1).R(\theta) - // NB: This the *antiderivative* of the bracket - let newDer1 = 2.0 * HardamardTestPhysical( - sg::Apply, param, gates, paramShift, gates, nQubits + 1, - nMeasurements - ) - 1.0; - set grad w/= gate::Index <- (grad[gate::Index] + 0.5* (newDer - newDer1)); - set paramShift w/= gate::Index <-( param[gate::Index] + PI()); //unshift by 2 Pi (for debugging purposes) - } - set paramShift w/= gate::Index <- param[gate::Index]; //unshift this parameter - } - return grad; - - } //GradientHack - - - /// # Summary - /// computes stochastic gradient on one classical sample - /// - /// # Input - /// ## param - /// circuit parameters - /// - /// ## gates - /// sequence of gates in the circuits - /// - /// ## sample - /// sample vector as a raw array - /// - /// ## nMeasurements - /// number of true quantum measurements to estimate probabilities - /// - /// # Output - /// the gradient - /// - operation EstimateGradientFromClassicalSample(tolerance: Double, param : Double[], gates: GateSequence, sample: Double[], nMeasurements : Int) : (Double[]) { - let nQubits = MaxI(FeatureRegisterSize(sample), NQubitsRequired(gates)); - let circEnc = NoisyInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); - let sg = StateGenerator(nQubits, circEnc); - return EstimateGradient(param, gates, sg, nMeasurements); - } - - //Csharp-frendly adapter for gradient estimation - //'gates' is a array of "flattened" controlled rotation defitions - //each such definition is Int[no.controls+3] in the format [parameter index, Pauli index, target index <,control qubit indices>] - //Pauli index is: 0 for I, 1 for X, 2 for y, 3 for Z - //target index is the index of the target qubit of the rotation - //Sequence of can be empty for uncontroled - operation GradientClassicalSimulationAdapter(tolerance: Double, param : Double[], gates: Int[][], sample: Double[]) : (Double[]) - { - - return EstimateGradientFromClassicalSample(tolerance, param,unFlattenGateSequence(gates),sample,0); - - } - - /// # Summary - /// Get a list of all the classification probabilities. In the from of (prob1,label) pairs. THIS operation is IN DEPRECATION - /// - /// # Input - /// ## samples - /// a container of labeled samples - /// - /// ## sched - /// a schedule to define a subset of samples - /// - /// ## param - /// parameters of the circuits - /// - /// ## gates - /// the sequence of gates in the circuit - /// - /// ## nMeasurements - /// the maximum number of quantum measurements used in the probability estimation - /// - /// # Output - /// TODO - operation ClassificationProbabilitiesClassicalData(samples: LabeledSample[], sched: SamplingSchedule, param: Double[], gates: GateSequence, nMeasurements: Int): - (Double,Int)[] { - mutable N = IsEmpty(samples) - ? NQubitsRequired(gates) - | MaxI(NQubitsRequired(gates), FeatureRegisterSize(_Features(Head(samples)))); - mutable ret = new (Double, Int)[0]; - for (rg in sched!) { - for (ix in rg) { - let sample = samples[ix]; - //agnostic w.r.t. simulator (may still be simulable) - let prob1 = EstimateClassificationProbabilityFromSample(1E-12, param, gates, sample::Features, nMeasurements); - set ret += [(prob1, sample::Label)]; - } - } - - return ret; - } - - operation EstimateClassificationProbabilitiesClassicalDataAdapter(tolerance: Double, samples: Double[][], schedule: Int[][], nQubits: Int, gates: Int[][], param: Double[], measCount: Int): Double[] - { - return EstimateClassificationProbabilitiesClassicalData(tolerance, samples, unFlattenSchedule(schedule), nQubits, unFlattenGateSequence(gates), param, measCount); - } - - - /// # Summary - /// generate a flat list of sample indices where mispredictions occur - /// - /// # Input - /// ## sched - /// a sampling schedule - /// - /// ## pls - /// a list of estimated probabilities with the corresponding class labels - /// - /// ## bias - /// bias on record - /// - /// # Output - /// the list of indices where mispredictions occur - /// - function MissLocations(sched : SamplingSchedule, pls : (Double, Int)[], bias: Double) : Int[] { - mutable ret = new Int[0]; - mutable ir = 0; - - for (rg in sched!) { - for (ix in rg) { - let (prob1, lab) = pls[ir]; - set ir += 1; - if (prob1 + bias > 0.5) { - if (lab < 1) { - set ret += [ix]; - } - } else { - if (lab > 0) { - set ret += [ix]; - } - } - } - } - return ret; - } - - /// # Summary - /// C#-friendly adapter to misclassification tally - /// - /// # Input - /// ## vectors - /// data vectors in flat encoding - /// - /// ## labels - /// array of corresponding class lables - /// - /// ## schedule - /// flat representation of index subset on which the circuit is scored - /// - /// ## param - /// circuit parameters - /// - /// ## gateStructure - /// gate structure in flat representation - /// - /// ## bias - /// prediction bias to be tested - /// - /// ## measCount - /// maximum number of quantum measurements per estimation (measCount==0 implies simulator deployment) - /// - /// # Output - /// the number of misclassifications - /// - operation MisclassificationScoreAdapter(vectors: Double[][], labels: Int[], schedule: Int[][], param: Double[], gateStructure: Int[][], bias: Double, measCount: Int) : Int { - mutable misses = 0; - let samples = unFlattenLabeledSamples(vectors,labels); - let gates = unFlattenGateSequence(gateStructure); - let sched = unFlattenSchedule(schedule); - - let pls = ClassificationProbabilitiesClassicalData(samples,sched,param,gates,measCount); - let biasCurrent = _UpdatedBias(pls, bias, 0.01); - let (h1,m1) = TallyHitsMisses(pls,biasCurrent); - return m1; - } - - /// # Summary - /// Extract a mini batch of samples and wrap the batch as a LabeledSampleContainer - /// - /// # Input - /// ## size - /// desired number of samples in the mini batch - /// - /// ## ixLoc - /// starting index for the batch in the list of locations - /// - /// ## locations - /// list of indices of samples of interest - /// - /// ## samples - /// the container to extract the samples from - /// - /// # Output - /// the mini batched wrapped as a LabeledSampleContainer - /// - /// # Remarks - /// the resulting mini batch can be occasionally shorter than the requested 'size' - /// (when it falls on the tail end of the list of 'locations') - /// - function ExtractMiniBatch(size: Int, ixLoc: Int, locations: Int[], samples: LabeledSample[]): LabeledSample[] { - mutable cnt = Length(locations)-ixLoc; - if (cnt > size) - { - set cnt = size; - } - mutable rgSamples = new LabeledSample[0]; - if (cnt > 0) - { - set rgSamples = new LabeledSample[cnt]; - for (isa in 0..(cnt-1)) - { - set rgSamples w/=isa<- samples[locations[ixLoc+isa]]; - } - } - return rgSamples; - } - - /// # Summary - /// (Randomly) inflate of deflate the source number - operation randomize(src : Double, relativeFuzz : Double) : Double { - return src * ( - 1.0 + relativeFuzz * (Random([0.5, 0.5]) > 0 ? 1.0 | -1.0) - ); - } - - - - /// Summary - /// One possible C#-friendly wrap around the StochasticTrainingLoop - /// - operation StochasticTrainingLoopPlainAdapter(vectors: Double[][], labels: Int[], sched: Int[][], schedScore: Int[][], periodScore: Int, - miniBatchSize: Int, param: Double[],gates: Int[][], bias: Double, lrate: Double, maxEpochs: Int, tol: Double, measCount: Int ) : Double[] // - { - let samples = unFlattenLabeledSamples(vectors,labels); - let sch = unFlattenSchedule(sched); - let schScore = unFlattenSchedule(sched); - let gts = unFlattenGateSequence(gates); - let ((h,m),(b,parpar)) = StochasticTrainingLoop(samples, sch, schScore, periodScore, - miniBatchSize, param, gts, bias, lrate, maxEpochs, tol, measCount); - mutable ret = new Double[Length(parpar)+3]; - set ret w/=0<-IntAsDouble (h); - set ret w/=1<-IntAsDouble (m); - set ret w/=2<-b; - for (j in 0..(Length(parpar)-1)) - { - set ret w/=(j+3)<-parpar[j]; - } - return ret; - } - - -} diff --git a/MachineLearning/src/Runtime/Classification.qs b/MachineLearning/src/Runtime/Classification.qs index 65c3e67293e..aa3d9fae7e2 100644 --- a/MachineLearning/src/Runtime/Classification.qs +++ b/MachineLearning/src/Runtime/Classification.qs @@ -5,146 +5,90 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Arrays; open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Canon; - open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Convert; - operation EstimateClassificationProbabilityFromEncodedSample( - encodedSample : StateGenerator, - parameters: Double[], - gates: GateSequence, nMeasurements : Int - ) - : Double { - return 1.0 - EstimateFrequencyA( - endToEndPreparation(encodedSample::Apply, parameters,gates), - measureLastQubit(encodedSample::NQubits), - encodedSample::NQubits, - nMeasurements - ); - } + + operation _PrepareClassification( + encoder : (LittleEndian => Unit is Adj + Ctl), + parameters : Double[], + gates : GateSequence, + target : Qubit[] + ) + : Unit is Adj { + encoder(LittleEndian(target)); + _ApplyGates(parameters, gates, target); + } - operation EstimateClassificationProbabilityFromSample(tolerance: Double, parameters : Double[], gates: GateSequence, sample: Double[], nMeasurements: Int) - : Double { - let nQubits = FeatureRegisterSize(sample); - let circEnc = NoisyInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); - return EstimateClassificationProbabilityFromEncodedSample( - StateGenerator(nQubits, circEnc), parameters, gates, nMeasurements - ); + operation EstimateClassificationProbability( + tolerance: Double, + parameters : Double[], + gates: GateSequence, + sample: Double[], + nMeasurements: Int + ) + : Double { + let nQubits = FeatureRegisterSize(sample); + let circEnc = NoisyInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); + let encodedSample = StateGenerator(nQubits, circEnc); + return 1.0 - EstimateFrequencyA( + _PrepareClassification(encodedSample::Apply, parameters, gates, _), + _TailMeasurement(encodedSample::NQubits), + encodedSample::NQubits, + nMeasurements + ); + } - } + operation EstimateClassificationProbabilities( + tolerance : Double, + parameters : Double[], + structure : GateSequence, + samples : Double[][], + nMeasurements : Int + ) + : Double[] { + let effectiveTolerance = tolerance / IntAsDouble(Length(structure!)); + return ForEach( + EstimateClassificationProbability( + effectiveTolerance, parameters, structure, _, nMeasurements + ), + samples + ); + } - /// # Summary - /// Given a of classification probability and a bias, returns the - /// label inferred from that probability. - /// - /// # Input - /// ## bias - /// The bias between two classes, typically the result of training a - /// classifier. - /// ## probability - /// A classification probabilities for a particular sample, typicaly - /// resulting from estimating its classification frequency. - /// - /// # Output - /// The label inferred from the given classification probability. - function InferredLabel(bias : Double, probability : Double) : Int { - return probability + bias > 0.5 ? 1 | 0; - } + /// # Summary + /// Given a of classification probability and a bias, returns the + /// label inferred from that probability. + /// + /// # Input + /// ## bias + /// The bias between two classes, typically the result of training a + /// classifier. + /// ## probability + /// A classification probabilities for a particular sample, typicaly + /// resulting from estimating its classification frequency. + /// + /// # Output + /// The label inferred from the given classification probability. + function InferredLabel(bias : Double, probability : Double) : Int { + return probability + bias > 0.5 ? 1 | 0; + } - /// # Summary - /// Given an array of classification probabilities and a bias, returns the - /// label inferred from each probability. - /// - /// # Input - /// ## bias - /// The bias between two classes, typically the result of training a - /// classifier. - /// ## probabilities - /// An array of classification probabilities for a set of samples, typicaly - /// resulting from estimating classification frequencies. - /// - /// # Output - /// The label inferred from each classification probability. - function InferredLabels(bias : Double, probabilities : Double[]): Int[] { - return Mapped(InferredLabel(bias, _), probabilities); - } - - /// # Summary - /// Estimates all classification probabilities for a given dataset. - /// - /// # Input - /// ## samples - /// a container of labeled samples - /// - /// ## sched - /// a schedule to define a subset of samples - /// - /// ## nQubits - /// number of qubits in the classification circuit - /// - /// ## gates - /// the sequence of gates in the circuit - /// - /// ## param - /// parameters of the circuits - /// - /// ## measCount - /// - /// # Output - /// array of corresponding estimated probabilities of the top class label - /// - operation EstimateClassificationProbabilitiesClassicalData( - tolerance : Double, samples : Double[][], sched : SamplingSchedule, - nQubits : Int, gates : GateSequence, param : Double[], - nMeasurements : Int - ) : Double[] { - let effectiveTolerance = tolerance / IntAsDouble(Length(gates!)); - mutable ret = new Double[0]; - for (rg in sched!) { - for (ix in rg) { - let samp = samples[ix]; - set ret += [EstimateClassificationProbabilityFromEncodedSample( - StateGenerator(nQubits, NoisyInputEncoder(effectiveTolerance, samp)), - param, gates, nMeasurements - )]; - } - } - - return ret; - } - - /// # Summary - /// Using a flat description of a classification model, assign estimated probability of top class label - /// to each vector in the test set - /// - /// # Input - /// ## nQubits - /// the number of qubits used for data encoding - /// - /// ## gates - /// Flattened representation of classifier structure. Each element is - /// [parameterIndex, pauliCode, targetQubit, sequence of control qubits] - /// - /// ## parameters - /// an array of circuit parameters - /// - /// ## samples - /// the set of vectors to be labeled - /// - /// ## bias - /// top class bias - /// - /// ## nMeasurenets - /// number of the measurement cycles to be used for estimation of each probability - /// - /// # Output - /// Array of predicted class labels for each sample of the test set - /// - operation DoClassification(tolerance: Double, nQubits: Int, gates: Int[][], parameters: Double[], bias: Double, samples : Double[][], nMeasurements: Int) : Int[] { - let schedule = SamplingSchedule([0..Length(samples) - 1]); - let sequence = unFlattenGateSequence(gates); - let probs = EstimateClassificationProbabilitiesClassicalData( - tolerance, samples, schedule, nQubits, sequence, parameters, nMeasurements - ); - return InferredLabels(bias, probs); - } + /// # Summary + /// Given an array of classification probabilities and a bias, returns the + /// label inferred from each probability. + /// + /// # Input + /// ## bias + /// The bias between two classes, typically the result of training a + /// classifier. + /// ## probabilities + /// An array of classification probabilities for a set of samples, typicaly + /// resulting from estimating classification frequencies. + /// + /// # Output + /// The label inferred from each classification probability. + function InferredLabels(bias : Double, probabilities : Double[]): Int[] { + return Mapped(InferredLabel(bias, _), probabilities); + } } diff --git a/MachineLearning/src/Runtime/Deprecated.qs b/MachineLearning/src/Runtime/Deprecated.qs deleted file mode 100644 index a27d203ba34..00000000000 --- a/MachineLearning/src/Runtime/Deprecated.qs +++ /dev/null @@ -1,84 +0,0 @@ -namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Logical; - open Microsoft.Quantum.Arithmetic; - open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Canon; - open Microsoft.Quantum.Math; - - /// Sample container access method - @Deprecated("") - function getSample(samples: LabeledSampleContainer, ix: Int): LabeledSample { - return (samples!)[ix]; - } - - /// Access the raw data in a labeled sample - @Deprecated("") - function getData(samp: LabeledSample): Double[] { - return Fst(samp!); - } - - /// Access the label in a labeled sample - @Deprecated("") - function getLabel(samp:LabeledSample) : Int - { - return Snd(samp!); - } - - - /// Abstraction for a container of labeled samples - @Deprecated("") - newtype LabeledSampleContainer = LabeledSample[]; - - @Deprecated("Microsoft.Quantum.Diagnostics.DumpRegister") - function dumpRegisterToConsole ( qs: Qubit[]) : Unit - {} - //{DumpRegister((),qs);} //Swap for empty body when some dumping of registers is needed - - @Deprecated("Microsoft.Quantum.MachineLearning.NQubitsRequired") - function qubitSpan(seq : GateSequence) : Int { - return NQubitsRequired(seq); - } - - /// Set force a qubit into a desired basis state - @Deprecated("Microsoft.Quantum.Measurement.SetToBasisState") - operation Set (desired: Result, q1: Qubit) : Unit - { - //body - //{ - let current = M(q1); - if (desired != current) - { - X(q1); - } - //} - } - - @Deprecated("Microsoft.Quantum.Math.SquaredNorm") - function squareNorm(v:Double[]):Double - { - mutable ret = 0.0; - for (u in v) - { - set ret = ret + u*u; - } - return ret; - } - - @Deprecated("") // replace with ForEach. - operation randomizeArray(src:Double[], relativeFuzz: Double) : Double[] - { - mutable ret = new Double[Length(src)]; - for (ix in 0..(Length(src)-1)) - { - set ret w/=ix<-randomize(src[ix], relativeFuzz); - } - return ret; - } - - @Deprecated("Microsoft.Quantum.Math.NearlyEqualD") - function nearIdenticalDoubles(x:Double,y:Double):Bool { - return NearlyEqualD(x, y); //Note key tolerance constant here - } - - -} diff --git a/MachineLearning/src/Runtime/Examples.qs b/MachineLearning/src/Runtime/Examples.qs index 5c9d863f18d..b7c3ccd4129 100644 --- a/MachineLearning/src/Runtime/Examples.qs +++ b/MachineLearning/src/Runtime/Examples.qs @@ -1,96 +1,94 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Primitive; - open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Convert; open Microsoft.Quantum.Math; - operation IrisTrainingData() : LabeledSampleContainer { - let ret = - [LabeledSample(([0.581557, 0.562824, 0.447721, 0.380219], 1)), - LabeledSample(([0.570241, 0.544165, 0.503041, 0.354484], - 1)), LabeledSample(([0.510784, 0.475476, 0.453884, 0.554087], - 0)), LabeledSample(([0.492527, 0.473762, 0.471326, 0.557511], - 0)), LabeledSample(([0.543273, 0.501972, 0.518341, 0.429186], - 0)), LabeledSample(([0.520013, 0.485702, 0.440061, 0.547747], - 0)), LabeledSample(([0.585261, 0.545431, 0.462126, 0.382641], - 1)), LabeledSample(([0.541059, 0.479438, 0.568697, 0.392401], - 0)), LabeledSample(([0.555604, 0.517196, 0.474722, 0.445479], - 1)), LabeledSample(([0.592542, 0.537541, 0.468725, 0.374486], - 1)), LabeledSample(([0.552254, 0.51027, 0.511855, 0.415505], - 0)), LabeledSample(([0.530874, 0.465606, 0.503344, 0.498025], - 0)), LabeledSample(([0.568502, 0.492452, 0.524331, 0.399215], - 0)), LabeledSample(([0.511768, 0.53197, 0.46875, 0.485156], - 0)), LabeledSample(([0.555756, 0.420141, 0.553663, 0.456152], - 0)), LabeledSample(([0.584546, 0.562276, 0.439516, 0.385976], - 1)), LabeledSample(([0.608485, 0.577022, 0.427781, 0.337336], - 1)), LabeledSample(([0.546234, 0.59768, 0.46082, 0.36339], - 1)), LabeledSample(([0.596632, 0.510739, 0.482188, 0.388162], - 1)), LabeledSample(([0.512997, 0.525043, 0.460839, 0.49879], - 0)), LabeledSample(([0.477408, 0.488846, 0.465015, 0.562914], - 0)), LabeledSample(([0.553381, 0.457028, 0.546788, 0.431182], - 0)), LabeledSample(([0.543981, 0.555533, 0.491698, 0.392047], - 1)), LabeledSample(([0.532066, 0.497762, 0.5178, 0.448354], - 1)), LabeledSample(([0.505981, 0.460209, 0.506897, 0.524639], - 0)), LabeledSample(([0.44959, 0.489591, 0.490236, 0.563772], - 0)), LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], - 0)), LabeledSample(([0.552142, 0.553439, 0.474121, 0.405035], - 1)), LabeledSample(([0.495714, 0.452003, 0.497858, 0.549635], - 0)), LabeledSample(([0.523342, 0.480002, 0.484639, 0.510722], - 0)), LabeledSample(([0.493365, 0.473391, 0.504036, 0.527673], - 0)), LabeledSample(([0.552146, 0.542635, 0.505733, 0.380679], - 1)), LabeledSample(([0.578287, 0.517882, 0.46856, 0.421704], - 1)), LabeledSample(([0.588389, 0.569435, 0.47621, 0.320571], - 1)), LabeledSample(([0.572852, 0.583312, 0.441711, 0.369431], - 1)), LabeledSample(([0.540173, 0.571013, 0.440259, 0.43397], - 1)), LabeledSample(([0.588118, 0.554021, 0.452409, 0.377498], - 1)), LabeledSample(([0.499325, 0.454156, 0.500229, 0.542391], - 0)), LabeledSample(([0.541172, 0.446455, 0.491748, 0.515746], - 0)), LabeledSample(([0.501365, 0.513378, 0.488352, 0.496577], - 0)), LabeledSample(([0.519525, 0.498491, 0.475854, 0.505137], - 0)), LabeledSample(([0.549086, 0.561405, 0.474075, 0.398223], - 1)), LabeledSample(([0.504199, 0.486123, 0.476877, 0.53109], - 0)), LabeledSample(([0.530715, 0.466196, 0.504931, 0.496032], - 0)), LabeledSample(([0.515663, 0.527232, 0.474253, 0.480835], - 0)), LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], - 0)), LabeledSample(([0.591455, 0.54028, 0.471969, 0.368136], - 1)), LabeledSample(([0.459772, 0.46144, 0.462874, 0.601191], - 0)), LabeledSample(([0.527031, 0.492257, 0.472236, 0.506867], - 0)), LabeledSample(([0.534498, 0.534498, 0.495766, 0.427598], - 0)), LabeledSample(([0.561849, 0.441966, 0.530269, 0.455857], - 0)), LabeledSample(([0.483984, 0.503088, 0.458885, 0.549624], - 0)), LabeledSample(([0.525126, 0.566848, 0.450923, 0.446761], - 1)), LabeledSample(([0.576674, 0.501348, 0.480182, 0.430723], - 1)), LabeledSample(([0.58787, 0.558697, 0.451917, 0.371534], - 1)), LabeledSample(([0.584716, 0.552543, 0.446305, 0.391937], - 1)), LabeledSample(([0.604866, 0.502993, 0.484769, 0.382275], - 1)), LabeledSample(([0.576834, 0.538774, 0.469003, 0.39626], - 1)), LabeledSample(([0.588747, 0.563029, 0.444888, 0.372089], - 1)), LabeledSample(([0.575899, 0.560012, 0.4573, 0.38158], - 1)), LabeledSample(([0.552402, 0.574207, 0.444699, 0.409123], - 1)), LabeledSample(([0.589006, 0.546658, 0.46965, 0.365605], - 1)), LabeledSample(([0.540387, 0.443462, 0.537296, 0.471843], - 0)), LabeledSample(([0.570654, 0.548912, 0.458326, 0.403716], - 1)), LabeledSample(([0.544644, 0.547271, 0.467682, 0.430268], - 1)), LabeledSample(([0.525228, 0.503964, 0.508832, 0.459615], - 0)), LabeledSample(([0.462827, 0.527655, 0.461528, 0.542553], - 0)), LabeledSample(([0.50897, 0.522189, 0.507054, 0.459527], - 0)), LabeledSample(([0.546369, 0.577899, 0.460934, 0.393768], - 1)), LabeledSample(([0.615382, 0.467063, 0.492079, 0.401268], - 1)), LabeledSample(([0.573572, 0.473185, 0.510765, 0.431544], - 1)), LabeledSample(([0.510624, 0.60155, 0.43847, 0.430285], - 1)), LabeledSample(([0.563956, 0.532924, 0.469591, 0.421223], - 1)), LabeledSample(([0.581565, 0.592669, 0.391677, 0.396376], - 1)), LabeledSample(([0.533848, 0.501219, 0.4732, 0.489762], - 0)), LabeledSample(([0.530036, 0.577194, 0.452731, 0.425375], - 1)), LabeledSample(([0.595573, 0.439349, 0.494919, 0.455325], - 1)), LabeledSample(([0.584424, 0.557699, 0.438769, 0.393576], - 1)), LabeledSample(([0.544759, 0.441244, 0.494108, 0.514196], - 0)), LabeledSample(([0.552072, 0.545641, 0.487013, 0.400388], 1)) - ]; - return LabeledSampleContainer(ret); - } + operation IrisTrainingData() : LabeledSample[] { + return [LabeledSample(([0.581557, 0.562824, 0.447721, 0.380219], 1)), + LabeledSample(([0.570241, 0.544165, 0.503041, 0.354484], + 1)), LabeledSample(([0.510784, 0.475476, 0.453884, 0.554087], + 0)), LabeledSample(([0.492527, 0.473762, 0.471326, 0.557511], + 0)), LabeledSample(([0.543273, 0.501972, 0.518341, 0.429186], + 0)), LabeledSample(([0.520013, 0.485702, 0.440061, 0.547747], + 0)), LabeledSample(([0.585261, 0.545431, 0.462126, 0.382641], + 1)), LabeledSample(([0.541059, 0.479438, 0.568697, 0.392401], + 0)), LabeledSample(([0.555604, 0.517196, 0.474722, 0.445479], + 1)), LabeledSample(([0.592542, 0.537541, 0.468725, 0.374486], + 1)), LabeledSample(([0.552254, 0.51027, 0.511855, 0.415505], + 0)), LabeledSample(([0.530874, 0.465606, 0.503344, 0.498025], + 0)), LabeledSample(([0.568502, 0.492452, 0.524331, 0.399215], + 0)), LabeledSample(([0.511768, 0.53197, 0.46875, 0.485156], + 0)), LabeledSample(([0.555756, 0.420141, 0.553663, 0.456152], + 0)), LabeledSample(([0.584546, 0.562276, 0.439516, 0.385976], + 1)), LabeledSample(([0.608485, 0.577022, 0.427781, 0.337336], + 1)), LabeledSample(([0.546234, 0.59768, 0.46082, 0.36339], + 1)), LabeledSample(([0.596632, 0.510739, 0.482188, 0.388162], + 1)), LabeledSample(([0.512997, 0.525043, 0.460839, 0.49879], + 0)), LabeledSample(([0.477408, 0.488846, 0.465015, 0.562914], + 0)), LabeledSample(([0.553381, 0.457028, 0.546788, 0.431182], + 0)), LabeledSample(([0.543981, 0.555533, 0.491698, 0.392047], + 1)), LabeledSample(([0.532066, 0.497762, 0.5178, 0.448354], + 1)), LabeledSample(([0.505981, 0.460209, 0.506897, 0.524639], + 0)), LabeledSample(([0.44959, 0.489591, 0.490236, 0.563772], + 0)), LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], + 0)), LabeledSample(([0.552142, 0.553439, 0.474121, 0.405035], + 1)), LabeledSample(([0.495714, 0.452003, 0.497858, 0.549635], + 0)), LabeledSample(([0.523342, 0.480002, 0.484639, 0.510722], + 0)), LabeledSample(([0.493365, 0.473391, 0.504036, 0.527673], + 0)), LabeledSample(([0.552146, 0.542635, 0.505733, 0.380679], + 1)), LabeledSample(([0.578287, 0.517882, 0.46856, 0.421704], + 1)), LabeledSample(([0.588389, 0.569435, 0.47621, 0.320571], + 1)), LabeledSample(([0.572852, 0.583312, 0.441711, 0.369431], + 1)), LabeledSample(([0.540173, 0.571013, 0.440259, 0.43397], + 1)), LabeledSample(([0.588118, 0.554021, 0.452409, 0.377498], + 1)), LabeledSample(([0.499325, 0.454156, 0.500229, 0.542391], + 0)), LabeledSample(([0.541172, 0.446455, 0.491748, 0.515746], + 0)), LabeledSample(([0.501365, 0.513378, 0.488352, 0.496577], + 0)), LabeledSample(([0.519525, 0.498491, 0.475854, 0.505137], + 0)), LabeledSample(([0.549086, 0.561405, 0.474075, 0.398223], + 1)), LabeledSample(([0.504199, 0.486123, 0.476877, 0.53109], + 0)), LabeledSample(([0.530715, 0.466196, 0.504931, 0.496032], + 0)), LabeledSample(([0.515663, 0.527232, 0.474253, 0.480835], + 0)), LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], + 0)), LabeledSample(([0.591455, 0.54028, 0.471969, 0.368136], + 1)), LabeledSample(([0.459772, 0.46144, 0.462874, 0.601191], + 0)), LabeledSample(([0.527031, 0.492257, 0.472236, 0.506867], + 0)), LabeledSample(([0.534498, 0.534498, 0.495766, 0.427598], + 0)), LabeledSample(([0.561849, 0.441966, 0.530269, 0.455857], + 0)), LabeledSample(([0.483984, 0.503088, 0.458885, 0.549624], + 0)), LabeledSample(([0.525126, 0.566848, 0.450923, 0.446761], + 1)), LabeledSample(([0.576674, 0.501348, 0.480182, 0.430723], + 1)), LabeledSample(([0.58787, 0.558697, 0.451917, 0.371534], + 1)), LabeledSample(([0.584716, 0.552543, 0.446305, 0.391937], + 1)), LabeledSample(([0.604866, 0.502993, 0.484769, 0.382275], + 1)), LabeledSample(([0.576834, 0.538774, 0.469003, 0.39626], + 1)), LabeledSample(([0.588747, 0.563029, 0.444888, 0.372089], + 1)), LabeledSample(([0.575899, 0.560012, 0.4573, 0.38158], + 1)), LabeledSample(([0.552402, 0.574207, 0.444699, 0.409123], + 1)), LabeledSample(([0.589006, 0.546658, 0.46965, 0.365605], + 1)), LabeledSample(([0.540387, 0.443462, 0.537296, 0.471843], + 0)), LabeledSample(([0.570654, 0.548912, 0.458326, 0.403716], + 1)), LabeledSample(([0.544644, 0.547271, 0.467682, 0.430268], + 1)), LabeledSample(([0.525228, 0.503964, 0.508832, 0.459615], + 0)), LabeledSample(([0.462827, 0.527655, 0.461528, 0.542553], + 0)), LabeledSample(([0.50897, 0.522189, 0.507054, 0.459527], + 0)), LabeledSample(([0.546369, 0.577899, 0.460934, 0.393768], + 1)), LabeledSample(([0.615382, 0.467063, 0.492079, 0.401268], + 1)), LabeledSample(([0.573572, 0.473185, 0.510765, 0.431544], + 1)), LabeledSample(([0.510624, 0.60155, 0.43847, 0.430285], + 1)), LabeledSample(([0.563956, 0.532924, 0.469591, 0.421223], + 1)), LabeledSample(([0.581565, 0.592669, 0.391677, 0.396376], + 1)), LabeledSample(([0.533848, 0.501219, 0.4732, 0.489762], + 0)), LabeledSample(([0.530036, 0.577194, 0.452731, 0.425375], + 1)), LabeledSample(([0.595573, 0.439349, 0.494919, 0.455325], + 1)), LabeledSample(([0.584424, 0.557699, 0.438769, 0.393576], + 1)), LabeledSample(([0.544759, 0.441244, 0.494108, 0.514196], + 0)), LabeledSample(([0.552072, 0.545641, 0.487013, 0.400388], 1)) + ]; + } - operation Examples () : Unit + operation Examples () : Unit { - + } } diff --git a/MachineLearning/src/Runtime/GradientEstimation.qs b/MachineLearning/src/Runtime/GradientEstimation.qs new file mode 100644 index 00000000000..92b340a6e19 --- /dev/null +++ b/MachineLearning/src/Runtime/GradientEstimation.qs @@ -0,0 +1,122 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Arithmetic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Preparation; + open Microsoft.Quantum.Characterization; + + // NOTE: the last qubit of 'reg' in this context is the auxillary qubit used in the Hadamard test. + operation _endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, reg: Qubit[]): Unit is Adj + Ctl { + let L = Length(reg) - 1; + let g1 = _ApplyGates(param1, gates1, _); + let g2 = _ApplyGates(param2, gates2, _); + + enc(LittleEndian(reg[0..(L-1)])); + within { + H(Tail(reg)); + } apply { + (Controlled g1) ([reg[L]], reg[0..(L-1)]); + within { + X(Tail(reg)); + } apply { + (Controlled g2) ([reg[L]], reg[0..(L-1)]); + (Controlled Z) ([reg[L]], reg[(L-1)]); + } + } + } + + operation endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl),param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence) : (Qubit[] => Unit is Adj) { + return _endToEndHTcircuit(enc,param1, gates1, param2, gates2, _); + } + + operation HardamardTestPhysical(enc2: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, nQubits: Int, nMeasurements : Int): Double { + return 1.0 - EstimateFrequencyA( + endToEndHTcircuit(enc2,param1,gates1,param2,gates2), + _TailMeasurement(nQubits), + nQubits, + nMeasurements + ); + } + + + + /// # Summary + /// polymorphic classical/quantum gradient estimator + /// + /// # Input + /// ## param + /// circuit parameters + /// + /// ## gates + /// sequence of gates in the circuits + /// + /// ## sg + /// generates quantum encoding of a subject sample (either simulated or true) + /// + /// ## measCount + /// number of true quantum measurements to estimate probabilities. + /// IMPORTANT: measCount==0 implies simulator deployment + /// + /// # Output + /// the gradient + /// + operation EstimateGradient( + gates : GateSequence, + param : Double[], + sg : StateGenerator, + nMeasurements : Int + ) + : (Double[]) { + //Synopsis: Suppose (param,gates) define Circ0 + //Suppose (param1,gates1) define Circ1 that implements one-gate derivative of Circ0 + //The expectation derivative is then 2 Re[] = + // Re[] - Re[] + //We observe SEE THEORY that for (Circ1)=(Circ0)' , Re[]==0 + //Thus we are left to compute Re[] = + // 1 - 1/2 < (Z \otimes Id) Circ0 psi - Circ1 psi | (Z \otimes Id) Circ0 psi - Circ1 psi> + //i.e., 1 - HadamardTestResultHack(Circ1,[Z],Circ0) + + + //Now, suppose a gate at which we differentiate is the (Controlled R(\theta))([k0,k1,...,kr],[target]) + //and we want a unitary description of its \theta-derivative. It can be written as + // 1/2 {(Controlled R(\theta'))([k0,k1,...,kr],[target]) - (Controlled Z)([k1,...,kr],[k0])(Controlled R(\theta'))([k0,k1,...,kr],[target])} + mutable grad = ConstantArray(Length(param), 0.0); + let nQubits = MaxI(NQubitsRequired(gates), sg::NQubits); + + for (gate in gates!) { + let paramShift = (param + [0.0]) + // Shift the corresponding parameter. + w/ gate::Index <- (param[gate::Index] + PI()); + + // NB: This the *antiderivative* of the bracket + let newDer = 2.0 * HardamardTestPhysical( + sg::Apply, param, gates, paramShift, gates, nQubits + 1, nMeasurements + ) - 1.0; + if (IsEmpty(gate::Span::ControlIndices)) { + //uncontrolled gate + set grad w/= gate::Index <- grad[gate::Index] + newDer; + } else { + //controlled gate + let controlledShift = paramShift + w/ gate::Index <- (param[gate::Index] + 3.0 * PI()); + //Assumption: any rotation R has the property that R(\theta+2 Pi)=(-1).R(\theta) + // NB: This the *antiderivative* of the bracket + let newDer1 = 2.0 * HardamardTestPhysical( + sg::Apply, param, gates, controlledShift, gates, nQubits + 1, + nMeasurements + ) - 1.0; + set grad w/= gate::Index <- (grad[gate::Index] + 0.5 * (newDer - newDer1)); + } + } + return grad; + + } + +} diff --git a/MachineLearning/src/Runtime/InputEncoding.qs b/MachineLearning/src/Runtime/InputEncoding.qs index 201b1d5e3be..730fa2d98e8 100644 --- a/MachineLearning/src/Runtime/InputEncoding.qs +++ b/MachineLearning/src/Runtime/InputEncoding.qs @@ -2,6 +2,7 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Arrays; open Microsoft.Quantum.Preparation; open Microsoft.Quantum.Convert; open Microsoft.Quantum.Math; @@ -35,19 +36,31 @@ namespace Microsoft.Quantum.MachineLearning { return ret; } + function _NegativeLocations(cNegative: Int, coefficients : ComplexPolar[]) : Int[] { + mutable negLocs = new Int[0]; + for ((idx, coefficient) in Enumerated(coefficients)) { + if (AbsD(coefficient::Argument - PI()) < 1E-9) { + set negLocs += [idx]; + } + } + return Length(negLocs) > cNegative ? negLocs[...cNegative - 1] | negLocs; + } + /// Do special processing on the first cNegative entries - operation _EncodeSparseNegativeInput(cNegative: Int, tolerance: Double,coefficients : ComplexPolar[], reg: LittleEndian): Unit is Adj + Ctl - { - let negLocs = collectNegativeLocs(cNegative, coefficients); + operation _EncodeSparseNegativeInput( + cNegative: Int, + tolerance: Double, + coefficients : ComplexPolar[], + reg: LittleEndian + ) + : Unit is Adj + Ctl { + let negLocs = _NegativeLocations(cNegative, coefficients); // Prepare the state disregarding the sign of negative components. ApproximatelyPrepareArbitraryState(tolerance, _Unnegate(negLocs, coefficients), reg); // Reflect about the negative coefficients to apply the negative signs // at the end. - for (ineg in 0..(cNegative - 1)) { - let jx = negLocs[ineg]; - if (jx > -1) { - ReflectAboutInteger(jx, reg); //TODO:REVIEW: this assumes that 2^Length(reg) is the minimal pad to Length(coefficients) - } + for (idxNegative in negLocs) { + ReflectAboutInteger(idxNegative, reg); //TODO:REVIEW: this assumes that 2^Length(reg) is the minimal pad to Length(coefficients) } } diff --git a/MachineLearning/src/Runtime/Training.qs b/MachineLearning/src/Runtime/Training.qs index a6072d499da..9acea0d7dc6 100644 --- a/MachineLearning/src/Runtime/Training.qs +++ b/MachineLearning/src/Runtime/Training.qs @@ -9,7 +9,7 @@ namespace Microsoft.Quantum.MachineLearning { function _MisclassificationRate(probabilities : Double[], labels : Int[], bias : Double) : Double { let proposedLabels = InferredLabels(bias, probabilities); - return IntAsDouble(NMismatches(proposedLabels, labels)) / IntAsDouble(Length(probabilities)); + return IntAsDouble(NMisclassifications(proposedLabels, labels)) / IntAsDouble(Length(probabilities)); } /// # Summary @@ -47,20 +47,14 @@ namespace Microsoft.Quantum.MachineLearning { } operation TrainSequentialClassifier( - nQubits: Int, gates: GateSequence, parameterSource: Double[][], samples: LabeledSample[], + options : TrainingOptions, trainingSchedule: SamplingSchedule, - validationSchedule: SamplingSchedule, - learningRate: Double, - tolerance: Double, - miniBatchSize: Int, - maxEpochs: Int, - nMeasurements: Int - ) : (Double[], Double) { - mutable retParam = [-1E12]; - mutable retBias = -2.0; //Indicates non-informative start + validationSchedule: SamplingSchedule + ) : SequentialModel { + mutable bestSoFar = SequentialModel([-1E12], -2.0); mutable bestValidation = Length(samples) + 1; let features = Mapped(_Features, samples); @@ -68,94 +62,34 @@ namespace Microsoft.Quantum.MachineLearning { for (idxStart in 0..(Length(parameterSource) - 1)) { Message($"Beginning training at start point #{idxStart}..."); - let ((h, m), (b, parpar)) = StochasticTrainingLoop( - samples, trainingSchedule, trainingSchedule, 1, miniBatchSize, - parameterSource[idxStart], gates, 0.0, learningRate, maxEpochs, - tolerance, nMeasurements + let proposedUpdate = TrainSequentialClassifierAtModel( + gates, SequentialModel(parameterSource[idxStart], 0.0), + samples, options, trainingSchedule, 1 ); - let probsValidation = EstimateClassificationProbabilitiesClassicalData( - tolerance, features, validationSchedule, nQubits, - gates, parpar, nMeasurements + let probabilities = EstimateClassificationProbabilities( + options::Tolerance, + proposedUpdate::Parameters, + gates, + Sampled(validationSchedule, features), + options::NMeasurements ); // Find the best bias for the new classification parameters. let localBias = _UpdatedBias( - Zip(probsValidation, Sampled(validationSchedule, labels)), + Zip(probabilities, Sampled(validationSchedule, labels)), 0.0, - tolerance + options::Tolerance ); - let localPL = InferredLabels(localBias, probsValidation); - let localMisses = NMismatches(localPL, Sampled(validationSchedule, labels)); + let localPL = InferredLabels(localBias, probabilities); + let localMisses = NMisclassifications(localPL, Sampled(validationSchedule, labels)); if (bestValidation > localMisses) { set bestValidation = localMisses; - set retParam = parpar; - set retBias = localBias; + set bestSoFar = proposedUpdate; } } - return (retParam, retBias); + return bestSoFar; } - /// # Summary - /// Using a flat description of a classification model, find a good local optimum - /// for the model parameters and a related calssification bias - /// - /// # Input - /// ## nQubits - /// the number of qubits used for data encoding - /// - /// ## gates - /// flat characterization of circuit structure. Each element is [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] - /// - /// ## parameterSource - /// an array of parameter arrays, to be used as SGD starting points - /// - /// ## trainingSet - /// the set of training samples - /// - /// ## trainingLabels - /// the set of training labels - /// - /// ## trainingSchedule - /// defines a subset of training data actually used in the training process - /// - /// ## validatioSchedule - /// defines a subset of training data used for validation and computation of the *bias* - /// - /// ## learningRate - /// initial learning rate for stochastic gradient descent - /// - /// ## tolerance - /// sufficient absolute precision of parameter updates - /// - /// ## learningRate - /// initial learning rate for stochastic gradient descent - /// - /// ## miniBatchSize - /// maximum size of SGD mini batches - /// - /// ## maxEpochs - /// limit to the number of training epochs - /// - /// ## nMeasurenets - /// number of the measurement cycles to be used for estimation of each probability - /// - /// # Output - /// (Array of optimal parameters, optimal validation *bias*) - /// - operation TrainQcccSequential(nQubits: Int, gates: Int[][], parameterSource: Double[][], trainingSet: Double[][], trainingLabels: Int[], trainingSchedule: Int[][], validationSchedule: Int[][], - learningRate: Double, tolerance: Double, miniBatchSize: Int, maxEpochs: Int, nMeasurements: Int) : (Double[],Double) { - let samples = unFlattenLabeledSamples(trainingSet,trainingLabels); - let sch = unFlattenSchedule(trainingSchedule); - let schValidate = unFlattenSchedule(validationSchedule); - let gateSequence = unFlattenGateSequence(gates); - - return TrainSequentialClassifier( - nQubits, gateSequence, parameterSource, samples, - sch, schValidate, learningRate, tolerance, miniBatchSize, - maxEpochs, nMeasurements - ); - } //TrainQcccSequential - /// # Summary /// attempts a single parameter update in the direction of mini batch gradient /// @@ -178,31 +112,40 @@ namespace Microsoft.Quantum.MachineLearning { /// # Output /// (utility, (new)parameters) pair /// - operation OneStochasticTrainingStep( - tolerance: Double, miniBatch: LabeledSample[], param: Double[], gates: GateSequence, - lrate: Double, measCount: Int - ) : (Double, Double[]) { - mutable upParam = new Double[Length(param)]; + operation _RunSingleTrainingStep( + miniBatch : LabeledSample[], + options : TrainingOptions, + param : Double[], gates : GateSequence + ) + : (Double, Double[]) { mutable batchGradient = ConstantArray(Length(param), 0.0); + let nQubits = MaxI(FeatureRegisterSize(miniBatch[0]::Features), NQubitsRequired(gates)); + let effectiveTolerance = options::Tolerance / IntAsDouble(Length(gates!)); - for (samp in miniBatch) { - mutable err = IntAsDouble(samp::Label); + for (sample in miniBatch) { + mutable err = IntAsDouble(sample::Label); if (err < 1.0) { set err = -1.0; //class 0 misclassified to class 1; strive to reduce the probability } - let grad = EstimateGradientFromClassicalSample(tolerance, param, gates, samp::Features, measCount); + let stateGenerator = StateGenerator( + nQubits, + NoisyInputEncoder(effectiveTolerance, sample::Features) + ); + let grad = EstimateGradient( + gates, param, stateGenerator, + options::NMeasurements + ); for (ip in 0..(Length(param) - 1)) { // GradientClassicalSample actually computes antigradient, but err*grad corrects it back to gradient - set batchGradient w/= ip <- (batchGradient[ip] + lrate * err * grad[ip]); + set batchGradient w/= ip <- (batchGradient[ip] + options::LearningRate * err * grad[ip]); } } - for (ip in 0..(Length(param)-1)) { - set upParam w/= ip <- (param[ip] + batchGradient[ip]); - } - return (SquaredNorm(batchGradient), upParam); //TODO:REVIEW: Ok to interpret utility as size of the overall move? - } + let updatedParameters = Mapped(PlusD, Zip(param, batchGradient)); + // TODO:REVIEW: Ok to interpret utility as size of the overall move? + return (SquaredNorm(batchGradient), updatedParameters); + } /// # Summary /// Perform one epoch of circuit training on a subset of data samples to a quantum simulator @@ -238,92 +181,76 @@ namespace Microsoft.Quantum.MachineLearning { /// ## measCount /// number of true quantum measurements to estimate probabilities. /// - operation OneStochasticTrainingEpoch(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, - miniBatchSize: Int, param: Double[], gates: GateSequence, bias: Double, lrate: Double, tolerance: Double, measCount: Int, - h0: Int, m0: Int): ((Int,Int),(Double,Double[])) - { - let HARDCODEDunderage = 3; //4/26 slack greater than 3 is not recommended - - - mutable hBest = h0; - mutable mBest = m0; - mutable biasBest = bias; - - let pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); - let (h2,m2) = TallyHitsMisses(pls,biasBest); - let missLocations = MissLocations(schedScore, pls, biasBest); - - mutable paramBest = param; - mutable paramCurrent = paramBest; - mutable biasCurrent = biasBest; + operation _RunSingleTrainingEpoch( + samples: LabeledSample[], + schedule: SamplingSchedule, periodScore: Int, + options : TrainingOptions, + model : SequentialModel, gates: GateSequence, + nPreviousBestMisses : Int + ) + : (Int, SequentialModel) { + let HARDCODEDunderage = 3; // 4/26 slack greater than 3 is not recommended + + mutable nBestMisses = nPreviousBestMisses; + mutable bestSoFar = model; + let features = Mapped(_Features, samples); + let actualLabels = Mapped(_Label, samples); + + let inferredLabels = InferredLabels( + model::Bias, + EstimateClassificationProbabilities( + options::Tolerance, model::Parameters, gates, + features, options::NMeasurements + ) + ); //An epoch is just an attempt to update the parameters by learning from misses based on LKG parameters - for (ixLoc in 0..miniBatchSize..(Length(missLocations) - 1)) { - let miniBatch = ExtractMiniBatch(miniBatchSize, ixLoc, missLocations, samples); - let (utility,upParam) = OneStochasticTrainingStep(tolerance, miniBatch, paramCurrent, gates, lrate, measCount); - if (Microsoft.Quantum.Math.AbsD(utility) > 0.0000001) { - //There had been some parameter update - if (utility > 0.0) { //good parameter update - set paramCurrent = upParam; - let plsCurrent = ClassificationProbabilitiesClassicalData(samples, schedScore, paramCurrent, gates, measCount); - set biasCurrent = _UpdatedBias(plsCurrent, bias, tolerance); - let (h1,m1) = TallyHitsMisses(plsCurrent,biasCurrent); - if (m1 < mBest + HARDCODEDunderage) { - //we allow limited non-greediness - if (m1 < mBest) { - set hBest = h1; - set mBest = m1; - set paramBest = paramCurrent; - set biasBest = biasCurrent; - } - } else { - //otherwise we scrap the parameter update - set paramCurrent = paramBest; - set biasCurrent = biasBest; - } + let minibatches = Mapped( + Subarray(_, samples), + Chunks( + options::MinibatchSize, + Misclassifications(inferredLabels, actualLabels) + ) + ); + for (minibatch in minibatches) { + let (utility, updatedParameters) = _RunSingleTrainingStep( + minibatch, options, bestSoFar::Parameters, gates + ); + if (utility > 0.0000001) { + // There has been some good parameter update. + // Check if it actually improves things, and if so, + // commit it. + let probabilities = EstimateClassificationProbabilities( + options::Tolerance, updatedParameters, gates, + features, options::NMeasurements + ); + let updatedBias = _UpdatedBias( + Zip(probabilities, actualLabels), model::Bias, options::Tolerance + ); + let updatedLabels = InferredLabels( + updatedBias, probabilities + ); + let nMisses = Length(Misclassifications( + updatedLabels, actualLabels + )); + if (nMisses < nBestMisses) { + set nBestMisses = nMisses; + set bestSoFar = SequentialModel(updatedParameters, updatedBias); } } } - return ((hBest, mBest), (biasBest, paramBest)); + return (nBestMisses, bestSoFar); } - //Make some oblivious gradien descent steps without checking the prediction quality - operation OneUncontrolledStochasticTrainingEpoch(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, - miniBatchSize: Int, param: Double[], gates: GateSequence, bias: Double, lrate: Double, tolerance: Double, measCount: Int): ((Int,Int),(Double,Double[])) - { - let pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); - mutable biasBest = _UpdatedBias(pls, bias, tolerance); - let (h0,m0) = TallyHitsMisses(pls,biasBest); // ClassificationScoreSimulated(samples, schedScore, param, gates, bias); //Deprecated - mutable hCur = h0; - mutable mCur = m0; - let missLocations = MissLocations(schedScore, pls, biasBest); - - mutable paramBest = param; - mutable paramCurrent = paramBest; - mutable biasCurrent = biasBest; - - //An epoch is just an attempt to update the parameters by learning from misses based on LKG parameters - for (ixLoc in 0..miniBatchSize..(Length(missLocations) - 1)) { - let miniBatch = ExtractMiniBatch(miniBatchSize,ixLoc,missLocations,samples); - let (utility,upParam) = OneStochasticTrainingStep(tolerance, miniBatch, paramCurrent, gates, lrate, measCount); - if (AbsD(utility) > 0.0000001) { - //There had been some parameter update - if (utility > 0.0) { //good parameter update - set paramCurrent = upParam; - let plsCurrent = ClassificationProbabilitiesClassicalData(samples, schedScore, paramCurrent, gates, measCount); - set biasCurrent = _UpdatedBias(plsCurrent, bias, tolerance); - let (h1,m1) = TallyHitsMisses(plsCurrent,biasCurrent); - set hCur = h1; - set mCur = m1; - } - - } - - } - return ((hCur, mCur),(biasCurrent,paramCurrent)); - } //OneUncontrolledStochasticTrainingEpoch + /// # Summary + /// Randomly rescales an input to either grow or shrink by a given factor. + operation _RandomlyRescale(scale : Double, value : Double) : Double { + return value * ( + 1.0 + scale * (Random([0.5, 0.5]) > 0 ? 1.0 | -1.0) + ); + } /// # Summary /// Run a full circuit training loop on a subset of data samples @@ -369,78 +296,100 @@ namespace Microsoft.Quantum.MachineLearning { /// # Output /// ((no.hits,no.misses),(opt.bias,opt.parameters)) /// - operation StochasticTrainingLoop(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, - miniBatchSizeInital: Int, param: Double[], gates: GateSequence, bias: Double, lrateInitial: Double, maxEpochs: Int, tol: Double, measCount: Int): ((Int,Int),(Double,Double[])) - { - //const - let manyNoops = 4; + operation TrainSequentialClassifierAtModel( + gates : GateSequence, + model : SequentialModel, + samples : LabeledSample[], + options : TrainingOptions, + schedule : SamplingSchedule, + periodScore : Int + ) + : SequentialModel { //const - let relFuzz = 0.01; - let HARDCODEDmaxNoops = 2*manyNoops; - mutable pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); - mutable biasBest = _UpdatedBias(pls, bias, tol); - let (h0, m0) = TallyHitsMisses(pls,biasBest); - mutable hBest = h0; - mutable mBest = m0; - mutable paramBest = param; - mutable paramCurrent = param; - mutable biasCurrent = biasBest; + let nSamples = Length(samples); + let features = Mapped(_Features, samples); + let actualLabels = Mapped(_Label, samples); + let probabilities = EstimateClassificationProbabilities( + options::Tolerance, model::Parameters, gates, + features, options::NMeasurements + ); + mutable bestSoFar = model + w/ Bias <- _UpdatedBias( + Zip(probabilities, actualLabels), + model::Bias, options::Tolerance + ); + let inferredLabels = InferredLabels( + bestSoFar::Bias, probabilities + ); + mutable nBestMisses = Length( + Misclassifications(inferredLabels, actualLabels) + ); + mutable current = bestSoFar; //reintroducing learning rate heuristics - mutable lrate = lrateInitial; - mutable batchSize = miniBatchSizeInital; - mutable noopCount = 0; - mutable upBias = biasCurrent; - mutable upParam = paramCurrent; - for (ep in 1..maxEpochs) { - let ((h1,m1),(upB,upP)) = OneStochasticTrainingEpoch(samples, sched, schedScore, periodScore, - batchSize, paramCurrent, gates, biasCurrent, lrate, tol, measCount, hBest, mBest); - set upBias = upB; - set upParam = upP; - if (m1 < mBest) - { - set hBest = h1; - set mBest = m1; - set paramBest = upParam; - set biasBest = upBias; - if (IntAsDouble (mBest)/IntAsDouble (mBest+hBest)< tol) //Terminate based on tolerance - { - return ((hBest,mBest),(biasBest,paramBest)); + mutable lrate = options::LearningRate; + mutable batchSize = options::MinibatchSize; + + // Keep track of how many times a bias update has stalled out. + mutable nStalls = 0; + + for (ep in 1..options::MaxEpochs) { + let (nMisses, proposedUpdate) = _RunSingleTrainingEpoch( + samples, schedule, periodScore, + options + w/ LearningRate <- lrate + w/ MinibatchSize <- batchSize, + current, gates, + nBestMisses + ); + if (nMisses < nBestMisses) { + set nBestMisses = nMisses; + set bestSoFar = proposedUpdate; + if (IntAsDouble(nMisses) / IntAsDouble(nSamples) < options::Tolerance) { //Terminate based on tolerance + return bestSoFar; } - set noopCount = 0; //Reset the counter of consequtive noops - set lrate = lrateInitial; - set batchSize = miniBatchSizeInital; + set nStalls = 0; //Reset the counter of consequtive noops + set lrate = options::LearningRate; + set batchSize = options::MinibatchSize; } - if (NearlyEqualD(biasCurrent,upBias) and _AllNearlyEqualD(paramCurrent,upParam)) - { - set noopCount = noopCount+1; - if (noopCount > manyNoops) - { - if (noopCount > HARDCODEDmaxNoops) - { - return ((hBest,mBest),(biasBest,paramBest)); //Too many non-steps. Continuation makes no sense - } - else - { - set upBias = randomize(upBias, relFuzz); - set upParam = ForEach(randomize(_, relFuzz), upParam); - } + + if ( + NearlyEqualD(current::Bias, proposedUpdate::Bias) and _AllNearlyEqualD(current::Parameters, proposedUpdate::Parameters) + ) { + set nStalls += 1; + // If we're more than halfway through our maximum allowed number of stalls, + // exit early with the best we actually found. + if (nStalls > options::MaxStalls) { + return bestSoFar; //Too many non-steps. Continuation makes no sense } - set batchSize = noopCount; //batchSize + 1; //Try to fuzz things up with smaller batch count + + // Otherwise, heat up the learning rate and batch size. + set batchSize = nStalls; //batchSize + 1; //Try to fuzz things up with smaller batch count //and heat up a bit - set lrate = 1.25*lrate; - } - else - { - set noopCount = 0; //Reset the counter of consequtive noops - set lrate = lrateInitial; - set batchSize = miniBatchSizeInital; + set lrate *= 1.25; + + // If we stalled out, we'll also randomly rescale our parameters + // and bias before updating. + if (nStalls > options::MaxStalls / 2) { + set current = SequentialModel( + ForEach(_RandomlyRescale(options::StochasticRescaleFactor, _), proposedUpdate::Parameters), + _RandomlyRescale(options::StochasticRescaleFactor, proposedUpdate::Bias) + ); + } + } else { + // If we learned successfully this iteration, reset the number of + // stalls so far. + set nStalls = 0; //Reset the counter of consequtive noops + set lrate = options::LearningRate; + set batchSize = options::MinibatchSize; + + // Since we didn't stall out, we can set the parameters and bias + // as normal, without randomizing. + set current = proposedUpdate; } - set paramCurrent = upParam; - set biasCurrent = upBias; } - return ((hBest,mBest),(biasBest,paramBest)); + return bestSoFar; } } diff --git a/MachineLearning/src/Runtime/Types.qs b/MachineLearning/src/Runtime/Types.qs index 759acbc4094..3d8ca3ab9a5 100644 --- a/MachineLearning/src/Runtime/Types.qs +++ b/MachineLearning/src/Runtime/Types.qs @@ -89,6 +89,25 @@ namespace Microsoft.Quantum.MachineLearning { NMisclassifications: Int ); + newtype TrainingOptions = ( + LearningRate: Double, + Tolerance: Double, + MinibatchSize: Int, + NMeasurements: Int, + MaxEpochs: Int, + MaxStalls: Int, + StochasticRescaleFactor: Double + ); + + function DefaultTrainingOptions() : TrainingOptions { + return TrainingOptions( + 0.1, 0.005, 15, 10000, 16, 8, 0.01 + ); + } + newtype SequentialModel = ( + Parameters: Double[], + Bias: Double + ); } diff --git a/MachineLearning/src/Runtime/Utils.qs b/MachineLearning/src/Runtime/Utils.qs index 6a97180404d..c412f05461d 100644 --- a/MachineLearning/src/Runtime/Utils.qs +++ b/MachineLearning/src/Runtime/Utils.qs @@ -9,4 +9,9 @@ namespace Microsoft.Quantum.MachineLearning { return Length(v1) == Length(v2) and All(NearlyEqualD, Zip(v1, v2)); } + operation _TailMeasurement(nQubits : Int) : (Qubit[] => Result) { + let paulis = ConstantArray(nQubits, PauliI) w/ (nQubits - 1) <- PauliZ; + return Measure(paulis, _); + } + } diff --git a/MachineLearning/src/Runtime/Validation.qs b/MachineLearning/src/Runtime/Validation.qs index fca2e87397a..f146eb4427a 100644 --- a/MachineLearning/src/Runtime/Validation.qs +++ b/MachineLearning/src/Runtime/Validation.qs @@ -1,42 +1,19 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Arrays; open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Logical; open Microsoft.Quantum.Canon; - function NMismatches(proposed: Int[], actual: Int[]): Int { - mutable count = 0; - for ((proposedLabel, actualLabel) in Zip(proposed, actual)) { - if (proposedLabel != actualLabel) { - set count += 1; - } - } - return count; + function Misclassifications(inferredLabels : Int[], actualLabels : Int[]) + : Int[] { + return Where( + NotEqualI, + Zip(inferredLabels, actualLabels) + ); } - /// # Summary - /// tallies hits and misses off a list of probability estimates - /// - /// # Input - /// ## pls - /// a list of estimated probabilities with the corresponding class labels - /// - /// ## bias - /// bias on record - /// - /// # Output - /// (no.hits, no.misses) pair - /// - function TallyHitsMisses(pls : (Double, Int)[], bias : Double) : (Int, Int) { - mutable hits = 0; - mutable misses = 0; - for ((classificationProbability, label) in pls) { - if (label == InferredLabel(bias, classificationProbability)) { - set hits += 1; - } else { - set misses += 1; - } - } - return (hits, misses); + function NMisclassifications(proposed: Int[], actual: Int[]): Int { + return Length(Misclassifications(proposed, actual)); } /// # Summary @@ -76,22 +53,34 @@ namespace Microsoft.Quantum.MachineLearning { { let schValidate = unFlattenSchedule(validationSchedule); let results = ValidateModel( - tolerance, nQubits, Mapped(LabeledSample, Zip(trainingSet, trainingLabels)), - schValidate, unFlattenGateSequence(gates), - parameters, bias, nMeasurements + unFlattenGateSequence(gates), + SequentialModel(parameters, bias), + Mapped(LabeledSample, Zip(trainingSet, trainingLabels)), + tolerance, nMeasurements, + schValidate ); return results::NMisclassifications; } - operation ValidateModel(tolerance: Double, nQubits: Int, samples : LabeledSample[], validationSchedule: SamplingSchedule, gates: GateSequence, parameters: Double[], bias:Double, nMeasurements: Int) : ValidationResults - { + operation ValidateModel( + gates: GateSequence, + model : SequentialModel, + samples : LabeledSample[], + tolerance: Double, + nMeasurements: Int, + validationSchedule: SamplingSchedule + ) + : ValidationResults { let features = Mapped(_Features, samples); let labels = Sampled(validationSchedule, Mapped(_Label, samples)); - let probsValidation = EstimateClassificationProbabilitiesClassicalData(tolerance, features, validationSchedule, nQubits, gates, parameters, nMeasurements); - let localPL = InferredLabels(bias, probsValidation); - let nMismatches = NMismatches(localPL, labels); + let probabilities = EstimateClassificationProbabilities( + tolerance, model::Parameters, gates, + Sampled(validationSchedule, features), nMeasurements + ); + let localPL = InferredLabels(model::Bias, probabilities); + let nMisclassifications = NMisclassifications(localPL, labels); return ValidationResults( - nMismatches + nMisclassifications ); } diff --git a/Standard/src/Arrays/Arrays.qs b/Standard/src/Arrays/Arrays.qs index 13a111729e5..bb45ed9bcc3 100644 --- a/Standard/src/Arrays/Arrays.qs +++ b/Standard/src/Arrays/Arrays.qs @@ -249,6 +249,33 @@ namespace Microsoft.Quantum.Arrays { | inputArray + padArray; // Padded at tail. } + + /// # Summary + /// Splits an array into multiple parts of equal length. + /// + /// # Input + /// ## nElements + /// The length of each chunk. + /// ## arr + /// The array to be split. + /// + /// # Output + /// A array containing each chunk of the original array. + /// + /// # Remarks + /// Note that the last element of the output may be shorter + /// than `nElements` if `Length(arr)` is not divisible by `nElements`. + function Chunks<'T>(nElements : Int, arr : 'T[]) : 'T[][] { + mutable output = new 'T[][0]; + mutable remaining = arr; + while (not IsEmpty(remaining)) { + let nElementsToTake = MinI(Length(remaining), nElements); + set output += [remaining[...nElementsToTake - 1]]; + set remaining = remaining[nElementsToTake...]; + } + return output; + } + /// # Summary /// Splits an array into multiple parts. /// diff --git a/Standard/src/Arrays/Filter.qs b/Standard/src/Arrays/Filter.qs index baa86f3c742..cadf8d58dae 100644 --- a/Standard/src/Arrays/Filter.qs +++ b/Standard/src/Arrays/Filter.qs @@ -2,6 +2,7 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Arrays { + open Microsoft.Quantum.Canon; /// # Summary /// Given an array and a predicate that is defined @@ -38,4 +39,30 @@ namespace Microsoft.Quantum.Arrays { return Subarray(idxArray[0 .. totalFound - 1], array); } + /// # Summary + /// Given a predicate and an array, returns the indices of that + /// array where the predicate is true. + /// + /// # Type Parameters + /// ## 'T + /// The type of `array` elements. + /// + /// # Input + /// ## predicate + /// A function from `'T` to Boolean that is used to filter elements. + /// ## array + /// An array of elements over `'T`. + /// + /// # Output + /// An array of indices where `predicate` is true. + function Where<'T>(predicate : ('T -> Bool), array : 'T[]) : Int[] { + return Mapped( + Fst, + Filtered( + Snd, + Enumerated(Mapped(predicate, array)) + ) + ); + } + } diff --git a/Standard/tests/ArrayTests.qs b/Standard/tests/ArrayTests.qs index 256b42468dd..556c3c51fe6 100644 --- a/Standard/tests/ArrayTests.qs +++ b/Standard/tests/ArrayTests.qs @@ -1,13 +1,14 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. namespace Microsoft.Quantum.Tests { + open Microsoft.Quantum.Logical; open Microsoft.Quantum.Diagnostics; open Microsoft.Quantum.Canon; open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Arrays; - - function ZipTest () : Unit { + @Test("QuantumSimulator") + function ZipTest() : Unit { let left = [1, 2, 101]; let right = [PauliY, PauliI]; @@ -26,6 +27,7 @@ namespace Microsoft.Quantum.Tests { } + @Test("QuantumSimulator") function LookupTest () : Unit { let array = [1, 12, 71, 103]; @@ -38,8 +40,28 @@ namespace Microsoft.Quantum.Tests { EqualityFactI(fn(1), 12, $"fn(1) did not return array[1]"); } + function _AllEqualI(expected : Int[], actual : Int[]) : Bool { + return All(EqualI, Zip(expected, actual)); + } + + @Test("QuantumSimulator") + function ChunksTest() : Unit { + let data = [10, 11, 12, 13, 14, 15]; + + // 2 × 3 case. + Fact(All(_AllEqualI, Zip( + [[10, 11], [12, 13], [14, 15]], + Chunks(2, data) + )), "Wrong chunks in 2x3 case."); + + // Case with some leftovers. + Fact(All(_AllEqualI, Zip( + [[10, 11, 12, 13], [14, 15]], + Chunks(4, data) + )), "Wrong chunks in case with leftover elements."); + } - function ConstantArrayTestHelper (x : Int) : Int { + function _Squared(x : Int) : Int { return x * x; } @@ -52,7 +74,7 @@ namespace Microsoft.Quantum.Tests { let ignore = Mapped(NearEqualityFactD(_, 2.17), dblArray); // Stress test by making an array of Int -> Int. - let fnArray = ConstantArray(7, ConstantArrayTestHelper); + let fnArray = ConstantArray(7, _Squared); EqualityFactI(Length(fnArray), 7, $"ConstantArray(Int, Int -> Int) had the wrong length."); EqualityFactI(fnArray[3](7), 49, $"ConstantArray(Int, Int -> Int) had the wrong value."); } From 67cd36914e6be9aa5047c518bc2999ed60be18df Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Wed, 8 Jan 2020 13:56:24 -0800 Subject: [PATCH 11/43] Update QML feature to master. (#198) * Clarify the restriction on the number of bits for IntAsBoolArray (#171) * Clarify the restriction on the number of bits for IntAsBoolArray This should fix #166 by providing a more specific error message. * Update Standard/src/Convert/Convert.qs Co-Authored-By: Chris Granade * Allow to have bits = 0 Looks like our tests assume that number = 0 with bits = 0 is a valid scenario; updating the change to account for that * Package updates (#188) * Quantum AND gates (a.k.a. CCNOT with constant target) (#186) * Two AND gate implementations. * Added test case. * Formatting. * Code formatting. * Update Standard/src/Canon/And.qs Co-Authored-By: Chris Granade * Assertion for 0-target. * Added DOI to references. * Named application for CCNOTop. * Rename operations. * Add Test attribute. * Add links to arXiv. * Rename operations. * Better assertion for 0-target. * Fix bug in LowDepthAnd. * Docs. * Doc string convention. * Controlled variant for `ApplyAnd`. * Controlled AndLowDepth. * Adjoint Controlled LowDepthAnd. * References. * Simplify code. * Apply suggestions from code review Co-Authored-By: Chris Granade * Integrate comment. * Removed comment ref to IncrementByIntegerPhaseLE (#189) There appears to be no function IncrementByIntegerPhaseLE, and I guess it is covered by ApplyLEOperationOnPhaseLE. Co-authored-by: Chris Granade * New Hadamard and SWAP test operations. (#196) * First work on Hadamard and SWAP test operations. * (c) header and typo fix. * Fixed typo with placement of phase shift. * Put public operations above private. * Added tests for new operations. * Added API documentation comments. * Newline at end of file. * Refactor AA namespace to use Q# style guide (#197) * Began simplifying AA interface. * Expose traditional AA as new public operation. * Removed rest of "AmpAmp" prefix. * Resolve deprecation warning. * Switching to using the new Sdk (#194) * Minor doc fixes (#190) * Minor doc fixes * Minor doc cleanup There are probably still some unstated assumptions on the algorithms. * Add "# Description" for MultiplyByModularInteger ...because there is inline math. Co-Authored-By: Chris Granade * "unitary operation" instead of "unitary operator" Co-Authored-By: Chris Granade * Add "# Description", remove refs in "# Summary" Co-authored-by: Chris Granade * Fix build by updating QML projects to use SDK. Co-authored-by: Mariia Mykhailova Co-authored-by: bettinaheim <34236215+bettinaheim@users.noreply.github.com> Co-authored-by: Mathias Soeken Co-authored-by: numpde <21158052+numpde@users.noreply.github.com> --- Build/props/tests.props | 17 + Build/steps.yml | 4 +- Chemistry/src/DataModel/DataModel.csproj | 1 - Chemistry/src/Jupyter/Jupyter.csproj | 2 - Chemistry/src/Runtime/Runtime.csproj | 16 +- .../tests/ChemistryTests/QSharpTests.csproj | 27 +- .../UnitaryCoupledClusterTests.qs | 2 +- .../tests/DataModelTests/CSharpTests.csproj | 10 +- .../tests/SamplesTests/SamplesTests.csproj | 10 +- .../tests/SystemTests/SystemTests.csproj | 30 +- .../src/DataModel/DataModel.csproj | 6 +- MachineLearning/src/Runtime/Runtime.csproj | 16 +- .../tests/MachineLearningTests.csproj | 18 +- Numerics/src/Numerics.csproj | 13 +- Numerics/tests/NumericsTests.csproj | 24 +- .../AmplitudeAmplification.qs | 389 ++++++++---------- .../CommonOracles.qs | 19 +- .../src/AmplitudeAmplification/Convert.qs | 47 +++ .../src/AmplitudeAmplification/Deprecated.qs | 112 +++++ .../StandardAlgorithms.qs | 78 ++++ Standard/src/AmplitudeAmplification/Types.qs | 9 +- Standard/src/Arithmetic/Increment.qs | 3 - Standard/src/Arithmetic/Modular.qs | 68 +-- Standard/src/Canon/And.qs | 367 +++++++++++++++++ .../Canon/Combinators/ApplyMultiControlled.qs | 6 +- .../Characterization/Distinguishability.qs | 234 +++++++++++ Standard/src/Oracles/Convert.qs | 71 ++-- Standard/src/Oracles/Types.qs | 14 +- .../src/Preparation/UniformSuperposition.qs | 12 +- Standard/src/Standard.csproj | 13 +- Standard/tests/ANDTests.qs | 73 ++++ .../DistinguishabilityTests.qs | 60 +++ Standard/tests/Standard.Tests.csproj | 21 +- updateQDKVersion.sh | 7 +- 34 files changed, 1331 insertions(+), 468 deletions(-) create mode 100644 Build/props/tests.props rename Standard/src/{Oracles => AmplitudeAmplification}/CommonOracles.qs (69%) create mode 100644 Standard/src/AmplitudeAmplification/Convert.qs create mode 100644 Standard/src/AmplitudeAmplification/Deprecated.qs create mode 100644 Standard/src/AmplitudeAmplification/StandardAlgorithms.qs create mode 100644 Standard/src/Canon/And.qs create mode 100644 Standard/src/Characterization/Distinguishability.qs create mode 100644 Standard/tests/ANDTests.qs create mode 100644 Standard/tests/Characterization/DistinguishabilityTests.qs diff --git a/Build/props/tests.props b/Build/props/tests.props new file mode 100644 index 00000000000..3a231bc74cc --- /dev/null +++ b/Build/props/tests.props @@ -0,0 +1,17 @@ + + + + + false + + + + + + + + + + + + diff --git a/Build/steps.yml b/Build/steps.yml index 2c03754cc15..3f76297789e 100644 --- a/Build/steps.yml +++ b/Build/steps.yml @@ -1,10 +1,10 @@ steps: - task: UseDotNet@2 - displayName: 'Use .NET Core SDK 3.0.100' + displayName: 'Use .NET Core SDK 3.1.100' inputs: packageType: sdk - version: '3.0.100' + version: '3.1.100' - powershell: ./build.ps1 diff --git a/Chemistry/src/DataModel/DataModel.csproj b/Chemistry/src/DataModel/DataModel.csproj index ef1a835a77e..5f7d7dba3ea 100644 --- a/Chemistry/src/DataModel/DataModel.csproj +++ b/Chemistry/src/DataModel/DataModel.csproj @@ -35,7 +35,6 @@ - diff --git a/Chemistry/src/Jupyter/Jupyter.csproj b/Chemistry/src/Jupyter/Jupyter.csproj index 5a480fa6b97..3f77384b03c 100644 --- a/Chemistry/src/Jupyter/Jupyter.csproj +++ b/Chemistry/src/Jupyter/Jupyter.csproj @@ -26,8 +26,6 @@ - - diff --git a/Chemistry/src/Runtime/Runtime.csproj b/Chemistry/src/Runtime/Runtime.csproj index 4eee1e92205..8e8b3c09c51 100644 --- a/Chemistry/src/Runtime/Runtime.csproj +++ b/Chemistry/src/Runtime/Runtime.csproj @@ -1,13 +1,10 @@ - + + netstandard2.1 Microsoft.Quantum.Chemistry.Runtime - bin\$(BuildConfiguration)\$(PlatformTarget)\$(AssemblyName).xml - x64 - - - - True + true + false @@ -15,11 +12,12 @@ - + - + + diff --git a/Chemistry/tests/ChemistryTests/QSharpTests.csproj b/Chemistry/tests/ChemistryTests/QSharpTests.csproj index 94bfd34ffc9..ef74163503e 100644 --- a/Chemistry/tests/ChemistryTests/QSharpTests.csproj +++ b/Chemistry/tests/ChemistryTests/QSharpTests.csproj @@ -1,28 +1,15 @@ - + + + + netcoreapp3.0 - x64 - false + false - - - - - - - - - - all - runtime; build; native; contentfiles; analyzers; buildtransitive - - - - - - + + diff --git a/Chemistry/tests/ChemistryTests/UnitaryCoupledClusterTests.qs b/Chemistry/tests/ChemistryTests/UnitaryCoupledClusterTests.qs index 753ed7b85d8..b9bcc85c262 100644 --- a/Chemistry/tests/ChemistryTests/UnitaryCoupledClusterTests.qs +++ b/Chemistry/tests/ChemistryTests/UnitaryCoupledClusterTests.qs @@ -79,7 +79,7 @@ namespace Microsoft.Quantum.Chemistry.Tests { function _DoublesToComplexPolar(input: Double[]) : ComplexPolar[]{ mutable arr = new ComplexPolar[Length(input)]; for(idx in 0..Length(input)-1){ - set arr w/= idx <- ComplexToComplexPolar(Complex((input[idx],0.))); + set arr w/= idx <- ComplexAsComplexPolar(Complex((input[idx],0.))); } return arr; } diff --git a/Chemistry/tests/DataModelTests/CSharpTests.csproj b/Chemistry/tests/DataModelTests/CSharpTests.csproj index 59db46d6537..20afc319ae5 100644 --- a/Chemistry/tests/DataModelTests/CSharpTests.csproj +++ b/Chemistry/tests/DataModelTests/CSharpTests.csproj @@ -18,14 +18,8 @@ - - - - - - @@ -44,6 +38,10 @@ + + + + diff --git a/Chemistry/tests/SamplesTests/SamplesTests.csproj b/Chemistry/tests/SamplesTests/SamplesTests.csproj index 410bb53582c..d60d4de76e0 100644 --- a/Chemistry/tests/SamplesTests/SamplesTests.csproj +++ b/Chemistry/tests/SamplesTests/SamplesTests.csproj @@ -12,14 +12,8 @@ - - - - - - @@ -38,6 +32,10 @@ + + + + PreserveNewest diff --git a/Chemistry/tests/SystemTests/SystemTests.csproj b/Chemistry/tests/SystemTests/SystemTests.csproj index 071091e0595..80bc481579a 100644 --- a/Chemistry/tests/SystemTests/SystemTests.csproj +++ b/Chemistry/tests/SystemTests/SystemTests.csproj @@ -1,9 +1,10 @@ - + + + netcoreapp3.0 - x64 - false + false @@ -11,23 +12,7 @@ PreserveNewest - - - - - - - - - - - - all - runtime; build; native; contentfiles; analyzers; buildtransitive - - - - + PreserveNewest @@ -43,6 +28,10 @@ + + + + @@ -50,3 +39,4 @@ + diff --git a/MachineLearning/src/DataModel/DataModel.csproj b/MachineLearning/src/DataModel/DataModel.csproj index 0b9c2b15adb..3aadd057759 100644 --- a/MachineLearning/src/DataModel/DataModel.csproj +++ b/MachineLearning/src/DataModel/DataModel.csproj @@ -5,6 +5,7 @@ x64 Microsoft.Quantum.MachineLearning.DataModel bin\$(BuildConfiguration)\$(PlatformTarget)\$(AssemblyName).xml + true @@ -31,11 +32,6 @@ - - - - - diff --git a/MachineLearning/src/Runtime/Runtime.csproj b/MachineLearning/src/Runtime/Runtime.csproj index c3da71b95ce..60fa28844d9 100644 --- a/MachineLearning/src/Runtime/Runtime.csproj +++ b/MachineLearning/src/Runtime/Runtime.csproj @@ -1,18 +1,16 @@ - + netstandard2.1 x64 Microsoft.Quantum.MachineLearning.Runtime + true + + false - - True - - - - - - diff --git a/MachineLearning/tests/MachineLearningTests.csproj b/MachineLearning/tests/MachineLearningTests.csproj index e8620296bed..d2f2b5afd76 100644 --- a/MachineLearning/tests/MachineLearningTests.csproj +++ b/MachineLearning/tests/MachineLearningTests.csproj @@ -1,23 +1,17 @@ - + + + + netcoreapp3.0 - x64 - false + Microsoft.Quantum.Standard.Tests + false - - - - - - - - - diff --git a/Numerics/src/Numerics.csproj b/Numerics/src/Numerics.csproj index e385cfc0376..d0798426a3d 100644 --- a/Numerics/src/Numerics.csproj +++ b/Numerics/src/Numerics.csproj @@ -1,8 +1,10 @@ - + + netstandard2.1 Microsoft.Quantum.Numerics - x64 + true + false @@ -30,10 +32,9 @@ - - - - + + + diff --git a/Numerics/tests/NumericsTests.csproj b/Numerics/tests/NumericsTests.csproj index a8ba4731346..62542b6e1e0 100644 --- a/Numerics/tests/NumericsTests.csproj +++ b/Numerics/tests/NumericsTests.csproj @@ -1,28 +1,14 @@ - - - netcoreapp3.0 - x64 - false - latest - + + + - 0162 + netcoreapp3.0 + false - - - - - - - all - runtime; build; native; contentfiles; analyzers; buildtransitive - - - diff --git a/Standard/src/AmplitudeAmplification/AmplitudeAmplification.qs b/Standard/src/AmplitudeAmplification/AmplitudeAmplification.qs index f6564b569ce..dde60e11be4 100644 --- a/Standard/src/AmplitudeAmplification/AmplitudeAmplification.qs +++ b/Standard/src/AmplitudeAmplification/AmplitudeAmplification.qs @@ -2,161 +2,44 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.AmplitudeAmplification { + open Microsoft.Quantum.Arrays; open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Convert; open Microsoft.Quantum.Math; open Microsoft.Quantum.Canon; open Microsoft.Quantum.Oracles; - /// # Summary - /// Converts phases specified as single-qubit rotations to phases - /// specified as partial reflections. - /// - /// # Input - /// ## rotPhases - /// Array of single-qubit rotations to be converted to partial - /// reflections. - /// - /// # Output - /// An operation that implements phases specified as partial reflections. - /// - /// # References - /// We use the convention in - /// - [ *G.H. Low, I. L. Chuang* ](https://arxiv.org/abs/1707.05391) - /// for relating single-qubit rotation phases to reflection operator phases. - function AmpAmpRotationToReflectionPhases (rotPhases : RotationPhases) : ReflectionPhases - { - let nPhasesRot = Length(rotPhases!); - let nPhasesRef = (nPhasesRot + 1) / 2; - - if (nPhasesRot % 2 == 0) - { - fail $"Number of rotations must be odd."; - } - - mutable phasesTarget = new Double[nPhasesRef]; - mutable phasesStart = new Double[nPhasesRef]; - set phasesTarget w/= 0 <- ((rotPhases!)[0] - (rotPhases!)[1]) - PI(); - set phasesStart w/= 0 <- -(rotPhases!)[0] + 0.5 * PI(); - - for (idxPhases in 1 .. nPhasesRef - 2) - { - set phasesTarget w/= idxPhases <- ((rotPhases!)[2 * idxPhases] - (rotPhases!)[2 * idxPhases + 1]) - PI(); - set phasesStart w/= idxPhases <- ((rotPhases!)[2 * idxPhases - 1] - (rotPhases!)[2 * idxPhases]) + PI(); - } - - set phasesTarget w/= nPhasesRef - 1 <- (rotPhases!)[2 * nPhasesRef - 2] - 0.5 * PI(); - set phasesStart w/= nPhasesRef - 1 <- ((rotPhases!)[2 * nPhasesRef - 3] - (rotPhases!)[2 * nPhasesRef - 2]) + PI(); - return ReflectionPhases(phasesStart, phasesTarget); - } - - - /// # Summary - /// Computes partial reflection phases for standard amplitude - /// amplification. - /// - /// # Input - /// ## nIterations - /// Number of amplitude amplification iterations to generate partial - /// reflection phases for. - /// - /// # Output - /// An operation that implements phases specified as partial reflections - /// - /// # Remarks - /// All phases are $\pi$, except for the first reflection about the start - /// state and the last reflection about the target state, which are $0$. - function AmpAmpPhasesStandard (nIterations : Int) : ReflectionPhases - { - mutable phasesTarget = new Double[nIterations + 1]; - mutable phasesStart = new Double[nIterations + 1]; - - for (idxPhases in 0 .. nIterations) - { - set phasesTarget w/= idxPhases <- PI(); - set phasesStart w/= idxPhases <- PI(); - } - - set phasesTarget w/= nIterations <- 0.0; - set phasesStart w/= 0 <- 0.0; - return ReflectionPhases(phasesStart, phasesTarget); - } - - - // We use the phases in "Fixed-Point Amplitude Amplification with an - // Optimal Number of Queires" [YoderLowChuang2014] - // See also "Methodology of composite quantum gates" [LowYoderChuang2016] - // for phases in the `RotationPhases` format - - /// # Summary - /// Computes partial reflection phases for fixed-point amplitude - /// amplification. - /// - /// # Input - /// ## nQueries - /// Number of queries to the state preparation oracle. Must be an odd - /// integer. - /// ## successMin - /// Target minimum success probability. - /// - /// # Output - /// Array of phases that can be used in fixed-point amplitude amplification - /// quantum algorithm implementation. - /// - /// # References - /// We use the phases in "Fixed-Point Amplitude Amplification with - /// an Optimal Number of Queries" - /// - [YoderLowChuang2014](https://arxiv.org/abs/1409.3305) - /// See also "Methodology of composite quantum gates" - /// - [LowYoderChuang2016](https://arxiv.org/abs/1603.03996) - /// for phases in the `RotationPhases` format. - function AmpAmpPhasesFixedPoint (nQueries : Int, successMin : Double) : ReflectionPhases - { - mutable phasesRot = new Double[nQueries]; - let nQueriesDouble = IntAsDouble(nQueries); - set phasesRot w/= 0 <- 0.0; - let beta = Cosh((1.0 / nQueriesDouble) * ArcCosh(Sqrt(successMin))); - - for (idxPhases in 1 .. nQueries - 1) - { - set phasesRot w/= idxPhases <- phasesRot[idxPhases - 1] + 2.0 * ArcTan(Tan((((2.0 * 1.0) * IntAsDouble(idxPhases)) * PI()) / nQueriesDouble) * Sqrt(1.0 - beta * beta)); - } - - return AmpAmpRotationToReflectionPhases(RotationPhases(phasesRot)); - } - - /// # Summary /// Oblivious amplitude amplification by specifying partial reflections. /// /// # Input /// ## phases /// Phases of partial reflections - /// ## ancillaReflection - /// Reflection operator about start state of ancilla register + /// ## startStateReflection + /// Reflection operator about start state of auxiliary register /// ## targetStateReflection - /// Reflection operator about target state of ancilla register + /// Reflection operator about target state of auxiliary register /// ## signalOracle /// Unitary oracle $O$ of type `ObliviousOracle` that acts jointly on the - /// ancilla and system registers. - /// ## ancillaRegister - /// Ancilla register + /// auxiliary and system registers. + /// ## auxiliaryRegister + /// Auxiliary register /// ## systemRegister /// System register /// /// # Remarks - /// Given a particular ancilla start state $\ket{\text{start}}\_a$, a - /// particular ancilla target state $\ket{\text{target}}\_a$, and any + /// Given a particular auxiliary start state $\ket{\text{start}}\_a$, a + /// particular auxiliary target state $\ket{\text{target}}\_a$, and any /// system state $\ket{\psi}\_s$, suppose that /// \begin{align} /// O\ket{\text{start}}\_a\ket{\psi}\_s= \lambda\ket{\text{target}}\_a U \ket{\psi}\_s + \sqrt{1-|\lambda|^2}\ket{\text{target}^\perp}\_a\cdots /// \end{align} /// for some unitary $U$. /// By a sequence of reflections about the start and target states on the - /// ancilla register interleaved by applications of `signalOracle` and its + /// auxiliary register interleaved by applications of `signalOracle` and its /// adjoint, the success probability of applying U may be altered. /// - /// In most cases, `ancillaRegister` is initialized in the state $\ket{\text{start}}\_a$. + /// In most cases, `auxiliaryRegister` is initialized in the state $\ket{\text{start}}\_a$. /// /// # References /// See @@ -165,69 +48,67 @@ namespace Microsoft.Quantum.AmplitudeAmplification { /// See /// - [ *G.H. Low, I.L. Chuang* ](https://arxiv.org/abs/1610.06546) /// for a generalization to partial reflections. - operation AmpAmpObliviousByReflectionPhasesImpl (phases : ReflectionPhases, ancillaReflection : ReflectionOracle, targetStateReflection : ReflectionOracle, signalOracle : ObliviousOracle, ancillaRegister : Qubit[], systemRegister : Qubit[]) : Unit - { - body (...) - { - let (phasesAncilla, phasesTarget) = phases!; - let nphases = 2 * Length(phasesTarget); - - //FailOn(nphases != Length(phasesAncilla), "Phase array lengths not equal.") - if (phasesAncilla[0] != 0.0) - { - ancillaReflection!(phasesAncilla[0], ancillaRegister); + operation ApplyObliviousAmplitudeAmplification( + phases : ReflectionPhases, + startStateReflection : ReflectionOracle, + targetStateReflection : ReflectionOracle, + signalOracle : ObliviousOracle, + auxiliaryRegister : Qubit[], + systemRegister : Qubit[] + ) + : Unit is Adj + Ctl { + for ((startPhase, targetPhase) in Zip(phases!)) { + if (startPhase != 0.0) { + startStateReflection::ApplyReflection( + startPhase, auxiliaryRegister + ); } - - for (idxPhases in 1 .. nphases - 1) - { - let idxPhaseAncilla = idxPhases / 2; - let idxPhaseTarget = idxPhases / 2; - - if (idxPhases % 2 == 1) - { - signalOracle!(ancillaRegister, systemRegister); - - if (phasesTarget[idxPhaseTarget] != 0.0) - { - targetStateReflection!(phasesTarget[idxPhaseTarget], ancillaRegister); - } - } - else - { - Adjoint signalOracle!(ancillaRegister, systemRegister); - - if (phasesAncilla[idxPhaseAncilla] != 0.0) - { - ancillaReflection!(phasesAncilla[idxPhaseAncilla], ancillaRegister); - } + + within { + signalOracle!(auxiliaryRegister, systemRegister); + } apply { + if (targetPhase != 0.0) { + targetStateReflection!(targetPhase, auxiliaryRegister); } } } - - adjoint invert; - controlled distribute; - controlled adjoint distribute; + // This gives us one extra application of Adjoint signalOracle!, so we + // apply the forward direction at the end. + signalOracle!(auxiliaryRegister, systemRegister); } - - + + + // NB [STYLE]: The name of this operation uses "From" as it is not a type + // conversion function ("As"), but something that constructs + // an operation from given information in a deterministic + // fashion. /// # Summary /// Returns a unitary that implements oblivious amplitude amplification by specifying for partial reflections. - function AmpAmpObliviousByReflectionPhases (phases : ReflectionPhases, ancillaReflection : ReflectionOracle, targetStateReflection : ReflectionOracle, signalOracle : ObliviousOracle) : ((Qubit[], Qubit[]) => Unit is Adj + Ctl) - { - return AmpAmpObliviousByReflectionPhasesImpl(phases, ancillaReflection, targetStateReflection, signalOracle, _, _); + function ObliviousAmplitudeAmplificationFromPartialReflections( + phases : ReflectionPhases, + startStateReflection : ReflectionOracle, + targetStateReflection : ReflectionOracle, + signalOracle : ObliviousOracle + ) + : ((Qubit[], Qubit[]) => Unit is Adj + Ctl) { + return ApplyObliviousAmplitudeAmplification( + phases, startStateReflection, targetStateReflection, signalOracle, + _, _ + ); } - - + + /// # Summary /// Oblivious amplitude amplification by oracles for partial reflections. /// /// # Input /// ## phases /// Phases of partial reflections - /// ## ancillaOracle - /// Unitary oracle $A$ that prepares ancilla start state + /// ## startStateOracle + /// Unitary oracle $A$ that prepares auxiliary start state /// ## signalOracle - /// Unitary oracle $O$ of type `ObliviousOracle` that acts jointly on the ancilla and system register + /// Unitary oracle $O$ of type `ObliviousOracle` that acts jointly on the + /// auxiliary and system register /// ## idxFlagQubit /// Index to single-qubit flag register /// @@ -235,23 +116,65 @@ namespace Microsoft.Quantum.AmplitudeAmplification { /// An operation that implements oblivious amplitude amplification based on partial reflections. /// /// # Remarks - /// This imposes stricter conditions on form of the ancilla start and target states than in `AmpAmpObliviousByReflectionPhases`. - /// It is assumed that $A\ket{0}\_f\ket{0}\_a= \ket{\text{start}}\_{fa}$ prepares the ancilla start state $\ket{\text{start}}\_{fa}$ from the computational basis $\ket{0}\_f\ket{0}$. + /// This imposes stricter conditions on form of the auxiliary start and target states than in `AmpAmpObliviousByReflectionPhases`. + /// It is assumed that $A\ket{0}\_f\ket{0}\_a= \ket{\text{start}}\_{fa}$ prepares the auxiliary start state $\ket{\text{start}}\_{fa}$ from the computational basis $\ket{0}\_f\ket{0}$. /// It is assumed that the target state is marked by $\ket{1}\_f$. /// It is assumed that /// \begin{align} /// O\ket{\text{start}}\_{fa}\ket{\psi}\_s= \lambda\ket{1}\_f\ket{\text{anything}}\_a\ket{\text{target}}\_s U \ket{\psi}\_s + \sqrt{1-|\lambda|^2}\ket{0}\_f\cdots, /// \end{align} /// for some unitary $U$. - function AmpAmpObliviousByOraclePhases (phases : ReflectionPhases, ancillaOracle : DeterministicStateOracle, signalOracle : ObliviousOracle, idxFlagQubit : Int) : ((Qubit[], Qubit[]) => Unit is Adj + Ctl) - { - let ancillaReflection = ReflectionStart(); + function ObliviousAmplitudeAmplificationFromStatePreparation( + phases : ReflectionPhases, + startStateOracle : DeterministicStateOracle, + signalOracle : ObliviousOracle, + idxFlagQubit : Int + ) + : ((Qubit[], Qubit[]) => Unit is Adj + Ctl) { + let startStateReflection = ReflectionStart(); let targetStateReflection = TargetStateReflectionOracle(idxFlagQubit); - let oracleObliviousNew = ObliviousOracleFromDeterministicStateOracle(ancillaOracle, signalOracle); - return AmpAmpObliviousByReflectionPhases(phases, ancillaReflection, targetStateReflection, oracleObliviousNew); + let obliviousSignalOracle = ObliviousOracleFromDeterministicStateOracle( + startStateOracle, signalOracle + ); + return ObliviousAmplitudeAmplificationFromPartialReflections( + phases, startStateReflection, targetStateReflection, obliviousSignalOracle + ); } - - + + /// # Summary + /// Applies amplitude amplification on a given register, using a given set + /// of phases and oracles to reflect about the initial and final states. + /// + /// # Input + /// ## phases + /// A set of phases describing the partial reflections at each step of the + /// amplitude amplification algorithm. See + /// @"microsoft.quantum.amplitudeamplification.standardreflectionphases" + /// for an example. + /// ## startStateReflection + /// An oracle that reflects about the initial state. + /// ## targetStateReflection + /// An oracle that reflects about the desired final state. + /// ## target + /// A register to perform amplitude amplification on. + operation ApplyAmplitudeAmplification( + phases : ReflectionPhases, + startStateReflection : ReflectionOracle, + targetStateReflection : ReflectionOracle, + target : Qubit[] + ) + : Unit is Adj + Ctl { + // Pass empty qubit array using fact that NoOp does nothing. + let systemRegister = new Qubit[0]; + let signalOracle = ObliviousOracle(NoOp<(Qubit[], Qubit[])>); + let op = ObliviousAmplitudeAmplificationFromPartialReflections( + phases, startStateReflection, targetStateReflection, signalOracle + ); + + op(target, systemRegister); + } + + /// # Summary /// Amplitude amplification by partial reflections. /// @@ -271,15 +194,25 @@ namespace Microsoft.Quantum.AmplitudeAmplification { /// # Remarks /// Amplitude amplification is a special case of oblivious amplitude amplification where there are no system qubits and the oblivious oracle is set to identity. /// In most cases, `startQubits` is initialized in the state $\ket{\text{start}}\_1$, which is the $-1$ eigenstate of `startStateReflection`. - function AmpAmpByReflectionsPhases (phases : ReflectionPhases, startStateReflection : ReflectionOracle, targetStateReflection : ReflectionOracle) : (Qubit[] => Unit is Adj + Ctl) - { + function AmplitudeAmplificationFromPartialReflections( + phases : ReflectionPhases, + startStateReflection : ReflectionOracle, + targetStateReflection : ReflectionOracle + ) + : (Qubit[] => Unit is Adj + Ctl) { // Pass empty qubit array using fact that NoOp does nothing. let qubitEmpty = new Qubit[0]; let signalOracle = ObliviousOracle(NoOp<(Qubit[], Qubit[])>); - return (AmpAmpObliviousByReflectionPhases(phases, startStateReflection, targetStateReflection, signalOracle))(_, qubitEmpty); + return (ObliviousAmplitudeAmplificationFromPartialReflections( + phases, startStateReflection, targetStateReflection, signalOracle + ))(_, qubitEmpty); } - - + + + // NB [STYLE]: The name of this operation uses "From" as it is not a type + // conversion function ("As"), but something that constructs + // an operation from given information in a deterministic + // fashion. /// # Summary /// Amplitude amplification by oracles for partial reflections. /// @@ -304,16 +237,22 @@ namespace Microsoft.Quantum.AmplitudeAmplification { /// \begin{align} /// A\ket{0}\_{f}\ket{0}\_s= \lambda\ket{1}\_f\ket{\text{target}}\_s + \sqrt{1-|\lambda|^2}\ket{0}\_f\cdots, /// \end{align} - /// In most cases, `flagQubit` and `ancillaRegister` is initialized in the state $\ket{0}\_{f}\ket{0}\_s$. - function AmpAmpByOraclePhases (phases : ReflectionPhases, stateOracle : StateOracle, idxFlagQubit : Int) : (Qubit[] => Unit is Adj + Ctl) - { - let qubitEmpty = new Qubit[0]; + /// In most cases, `flagQubit` and `auxiliaryRegister` are initialized in the state $\ket{0}\_{f}\ket{0}\_s$. + function AmplitudeAmplificationFromStatePreparation( + phases : ReflectionPhases, + stateOracle : StateOracle, + idxFlagQubit : Int + ) + : (Qubit[] => Unit is Adj + Ctl) { + let systemRegister = new Qubit[0]; let signalOracle = ObliviousOracle(NoOp<(Qubit[], Qubit[])>); - let ancillaOracle = DeterministicStateOracleFromStateOracle(idxFlagQubit, stateOracle); - return (AmpAmpObliviousByOraclePhases(phases, ancillaOracle, signalOracle, idxFlagQubit))(_, qubitEmpty); + let startStateOracle = DeterministicStateOracleFromStateOracle(idxFlagQubit, stateOracle); + return (ObliviousAmplitudeAmplificationFromStatePreparation( + phases, startStateOracle, signalOracle, idxFlagQubit + ))(_, systemRegister); } - - + + /// # Summary /// Standard Amplitude Amplification algorithm /// @@ -340,17 +279,21 @@ namespace Microsoft.Quantum.AmplitudeAmplification { /// \begin{align} /// \operatorname{AmpAmpByOracle}\ket{0}\_{f}\ket{0}\_s= \sin((2n+1)\sin^{-1}(\lambda))\ket{1}\_f\ket{\text{target}}\_s + \cdots\ket{0}\_f /// \end{align} - /// In most cases, `flagQubit` and `ancillaRegister` is initialized in the state $\ket{0}\_f\ket{0}\_a$. + /// In most cases, `flagQubit` and `auxiliaryRegister` is initialized in the state $\ket{0}\_f\ket{0}\_a$. /// /// # References /// - [ *G. Brassard, P. Hoyer, M. Mosca, A. Tapp* ](https://arxiv.org/abs/quant-ph/0005055) - function AmpAmpByOracle (nIterations : Int, stateOracle : StateOracle, idxFlagQubit : Int) : (Qubit[] => Unit is Adj + Ctl) - { - let phases = AmpAmpPhasesStandard(nIterations); - return AmpAmpByOraclePhases(phases, stateOracle, idxFlagQubit); + function StandardAmplitudeAmplification( + nIterations : Int, + stateOracle : StateOracle, + idxFlagQubit : Int + ) + : (Qubit[] => Unit is Adj + Ctl) { + let phases = StandardReflectionPhases(nIterations); + return AmplitudeAmplificationFromStatePreparation(phases, stateOracle, idxFlagQubit); } - - + + /// # Summary /// Fixed-Point Amplitude Amplification algorithm /// @@ -363,56 +306,50 @@ namespace Microsoft.Quantum.AmplitudeAmplification { /// # Remarks /// The startQubits must be in the $\ket{0 \cdots 0}$ state. This operation iterates over a number of queries in powers of $2$ until either a maximal number of queries /// is reached, or the target state is found. - operation AmpAmpRUSByOracle (statePrepOracle : StateOracle, startQubits : Qubit[]) : Unit - { + operation ApplyFixedPointAmplification(statePrepOracle : StateOracle, startQubits : Qubit[]) + : Unit { // Should be a power of 2 let queriesMax = 999; let successMin = 0.99; mutable finished = Zero; mutable exponentMax = 0; mutable exponentCurrent = 0; - + //Complexity: Let \theta = \mathcal{O}(\sqrt{lambda}) // Number of Measurements = O( Log^2(1/\theta) ) // Number of Queries = O(1/\theta) - using (flagQubit = Qubit[1]) - { + using (flagQubit = Qubit[1]) { let qubits = flagQubit + startQubits; let idxFlagQubit = 0; - - repeat - { - if (2 ^ exponentMax > queriesMax) - { + + repeat { + if (2 ^ exponentMax > queriesMax) { fail $"Target state not found. Maximum number of queries exceeded."; } - - repeat - { + + repeat { let queries = 2 ^ exponentCurrent; - let phases = AmpAmpPhasesFixedPoint(queries, successMin); - (AmpAmpByOraclePhases(phases, statePrepOracle, idxFlagQubit))(qubits); + let phases = FixedPointReflectionPhases(queries, successMin); + (AmplitudeAmplificationFromStatePreparation(phases, statePrepOracle, idxFlagQubit))(qubits); set finished = M(flagQubit[0]); set exponentCurrent = exponentCurrent + 1; } until (finished == One or exponentCurrent > exponentMax) - fixup - { + fixup { // flagQubit is already in Zero for fixup to apply ResetAll(startQubits); } - + set exponentCurrent = 0; set exponentMax = exponentMax + 1; } until (finished == One) - fixup - { + fixup { ResetAll(startQubits); } } } - + } diff --git a/Standard/src/Oracles/CommonOracles.qs b/Standard/src/AmplitudeAmplification/CommonOracles.qs similarity index 69% rename from Standard/src/Oracles/CommonOracles.qs rename to Standard/src/AmplitudeAmplification/CommonOracles.qs index ba169bb7ae4..7bd2bc4e6fb 100644 --- a/Standard/src/Oracles/CommonOracles.qs +++ b/Standard/src/AmplitudeAmplification/CommonOracles.qs @@ -20,20 +20,15 @@ namespace Microsoft.Quantum.AmplitudeAmplification { /// # Summary /// Implementation of . - operation TargetStateReflectionOracleImpl (phase : Double, idxFlagQubit : Int, qubits : Qubit[]) : Unit { - body (...) { - R1(phase, qubits[idxFlagQubit]); - } - - adjoint invert; - controlled distribute; - controlled adjoint distribute; + operation _TargetStateReflectionOracle(phase : Double, idxFlagQubit : Int, qubits : Qubit[]) + : Unit is Adj + Ctl { + R1(phase, qubits[idxFlagQubit]); } /// # Summary /// Constructs a `ReflectionOracle` about the target state uniquely marked by the flag qubit. - /// - /// The target state has a single qubit set to 1, and all others 0: $\ket{1}_f$. + /// + /// The target state has a single qubit set to 1, and all others 0: $\ket{1}_f$. /// /// # Input /// ## idxFlagQubit @@ -44,8 +39,8 @@ namespace Microsoft.Quantum.AmplitudeAmplification { /// /// # See Also /// - Microsoft.Quantum.Canon.ReflectionOracle - function TargetStateReflectionOracle (idxFlagQubit : Int) : ReflectionOracle { - return ReflectionOracle(TargetStateReflectionOracleImpl(_, idxFlagQubit, _)); + function TargetStateReflectionOracle(idxFlagQubit : Int) : ReflectionOracle { + return ReflectionOracle(_TargetStateReflectionOracle(_, idxFlagQubit, _)); } } diff --git a/Standard/src/AmplitudeAmplification/Convert.qs b/Standard/src/AmplitudeAmplification/Convert.qs new file mode 100644 index 00000000000..72bb061bbd3 --- /dev/null +++ b/Standard/src/AmplitudeAmplification/Convert.qs @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.AmplitudeAmplification { + open Microsoft.Quantum.Math; + + /// # Summary + /// Converts phases specified as single-qubit rotations to phases + /// specified as partial reflections. + /// + /// # Input + /// ## rotPhases + /// Array of single-qubit rotations to be converted to partial + /// reflections. + /// + /// # Output + /// An operation that implements phases specified as partial reflections. + /// + /// # References + /// We use the convention in + /// - [ *G.H. Low, I. L. Chuang* ](https://arxiv.org/abs/1707.05391) + /// for relating single-qubit rotation phases to reflection operator phases. + function RotationPhasesAsReflectionPhases(rotPhases : RotationPhases) + : ReflectionPhases { + let nPhasesRot = Length(rotPhases!); + let nPhasesRef = (nPhasesRot + 1) / 2; + + if (nPhasesRot % 2 == 0) { + fail $"Number of rotations must be odd."; + } + + mutable phasesTarget = new Double[nPhasesRef]; + mutable phasesStart = new Double[nPhasesRef]; + set phasesTarget w/= 0 <- ((rotPhases!)[0] - (rotPhases!)[1]) - PI(); + set phasesStart w/= 0 <- -(rotPhases!)[0] + 0.5 * PI(); + + for (idxPhases in 1 .. nPhasesRef - 2) { + set phasesTarget w/= idxPhases <- ((rotPhases!)[2 * idxPhases] - (rotPhases!)[2 * idxPhases + 1]) - PI(); + set phasesStart w/= idxPhases <- ((rotPhases!)[2 * idxPhases - 1] - (rotPhases!)[2 * idxPhases]) + PI(); + } + + set phasesTarget w/= nPhasesRef - 1 <- (rotPhases!)[2 * nPhasesRef - 2] - 0.5 * PI(); + set phasesStart w/= nPhasesRef - 1 <- ((rotPhases!)[2 * nPhasesRef - 3] - (rotPhases!)[2 * nPhasesRef - 2]) + PI(); + return ReflectionPhases(phasesStart, phasesTarget); + } + +} diff --git a/Standard/src/AmplitudeAmplification/Deprecated.qs b/Standard/src/AmplitudeAmplification/Deprecated.qs new file mode 100644 index 00000000000..6f0f2158ac4 --- /dev/null +++ b/Standard/src/AmplitudeAmplification/Deprecated.qs @@ -0,0 +1,112 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.AmplitudeAmplification { + open Microsoft.Quantum.Oracles; + + + /// # Deprecated + /// Please use + /// @"microsoft.quantum.amplitudeamplification.rotationphasesasreflectionphases". + @Deprecated("Microsoft.Quantum.AmplitudeAmplification.RotationPhasesAsReflectionPhases") + function AmpAmpRotationToReflectionPhases (rotPhases : RotationPhases) + : ReflectionPhases { + return RotationPhasesAsReflectionPhases(rotPhases); + } + + /// # Deprecated + /// Please use + /// @"microsoft.quantum.amplitudeamplification.standardreflectionphases". + @Deprecated("Microsoft.Quantum.AmplitudeAmplification.StandardReflectionPhases") + function AmpAmpPhasesStandard(nIterations : Int) : ReflectionPhases { + return StandardReflectionPhases(nIterations); + } + + /// # Deprecated + /// Please use + /// @"microsoft.quantum.amplitudeamplification.fixedpointreflectionphases". + @Deprecated("Microsoft.Quantum.AmplitudeAmplification.FixedPointReflectionPhases") + function AmpAmpPhasesFixedPoint(nQueries : Int, successMin : Double) : ReflectionPhases { + return FixedPointReflectionPhases(nQueries, successMin); + } + + /// # Deprecated + /// Please use @"microsoft.quantum.amplitudeamplification.obliviousamplitudeamplificationfrompartialreflections". + @Deprecated("Microsoft.Quantum.AmplitudeAmplification.ObliviousAmplitudeAmplificationFromPartialReflections") + function AmpAmpObliviousByReflectionPhases( + phases : ReflectionPhases, + startStateReflection : ReflectionOracle, + targetStateReflection : ReflectionOracle, + signalOracle : ObliviousOracle + ) + : ((Qubit[], Qubit[]) => Unit is Adj + Ctl) { + return ObliviousAmplitudeAmplificationFromPartialReflections( + phases, startStateReflection, targetStateReflection, signalOracle + ); + } + + /// # Deprecated + /// Please use @"microsoft.quantum.amplitudeamplification.obliviousamplitudeamplificationfromstatepreparation". + @Deprecated("Microsoft.Quantum.AmplitudeAmplification.ObliviousAmplitudeAmplificationFromStatePreparation") + function AmpAmpObliviousByOraclePhases( + phases : ReflectionPhases, + startStateOracle : DeterministicStateOracle, + signalOracle : ObliviousOracle, + idxFlagQubit : Int + ) + : ((Qubit[], Qubit[]) => Unit is Adj + Ctl) { + return ObliviousAmplitudeAmplificationFromStatePreparation( + phases, startStateOracle, signalOracle, idxFlagQubit + ); + } + + /// # Deprecated + /// Please use @"microsoft.quantum.amplitudeamplification.amplitudeamplificationfrompartialreflections". + @Deprecated("Microsoft.Quantum.AmplitudeAmplification.AmplitudeAmplificationFromPartialReflections") + function AmpAmpByReflectionPhases( + phases : ReflectionPhases, + startStateReflection : ReflectionOracle, + targetStateReflection : ReflectionOracle + ) + : (Qubit[] => Unit is Adj + Ctl) { + return AmplitudeAmplificationFromPartialReflections( + phases, startStateReflection, targetStateReflection + ); + } + + /// # Deprecated + /// Please use @"microsoft.quantum.amplitudeamplification.amplitudeamplificationfromstatepreparation". + @Deprecated("Microsoft.Quantum.AmplitudeAmplification.AmplitudeAmplificationFromStatePreparation") + function AmpAmpByOraclePhases( + phases : ReflectionPhases, + stateOracle : StateOracle, + idxFlagQubit : Int + ) + : (Qubit[] => Unit is Adj + Ctl) { + return AmplitudeAmplificationFromStatePreparation( + phases, stateOracle, idxFlagQubit + ); + } + + /// # Deprecated + /// Please use @"microsoft.quantum.amplitudeamplification.standardamplitudeamplification". + @Deprecated("Microsoft.Quantum.AmplitudeAmplification.StandardAmplitudeAmplification") + function AmpAmpByOracle( + nIterations : Int, + stateOracle : StateOracle, + idxFlagQubit : Int + ) + : (Qubit[] => Unit is Adj + Ctl) { + return StandardAmplitudeAmplification(nIterations, stateOracle, idxFlagQubit); + } + + + /// # Deprecated + /// Please use @"microsoft.quantum.amplitudeamplification.applyfixedpointamplification". + @Deprecated("Microsoft.Quantum.AmplitudeAmplification.ApplyFixedPointAmplification") + operation AmpAmpRUSByOracle(statePrepOracle : StateOracle, startQubits : Qubit[]) + : Unit { + ApplyFixedPointAmplification(statePrepOracle, startQubits); + } + +} diff --git a/Standard/src/AmplitudeAmplification/StandardAlgorithms.qs b/Standard/src/AmplitudeAmplification/StandardAlgorithms.qs new file mode 100644 index 00000000000..9bedaeee3d8 --- /dev/null +++ b/Standard/src/AmplitudeAmplification/StandardAlgorithms.qs @@ -0,0 +1,78 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.AmplitudeAmplification { + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Math; + + /// # Summary + /// Computes partial reflection phases for standard amplitude + /// amplification. + /// + /// # Input + /// ## nIterations + /// Number of amplitude amplification iterations to generate partial + /// reflection phases for. + /// + /// # Output + /// An operation that implements phases specified as partial reflections + /// + /// # Remarks + /// All phases are $\pi$, except for the first reflection about the start + /// state and the last reflection about the target state, which are $0$. + function StandardReflectionPhases(nIterations : Int) : ReflectionPhases { + let commonPhases = ConstantArray(nIterations, PI()); + let targetPhases = commonPhases + [0.0]; + let startPhases = [0.0] + commonPhases; + return ReflectionPhases(startPhases, targetPhases); + } + + // We use the phases in "Fixed-Point Amplitude Amplification with an + // Optimal Number of Queires" [YoderLowChuang2014] + // See also "Methodology of composite quantum gates" [LowYoderChuang2016] + // for phases in the `RotationPhases` format + + /// # Summary + /// Computes partial reflection phases for fixed-point amplitude + /// amplification. + /// + /// # Input + /// ## nQueries + /// Number of queries to the state preparation oracle. Must be an odd + /// integer. + /// ## successMin + /// Target minimum success probability. + /// + /// # Output + /// Array of phases that can be used in fixed-point amplitude amplification + /// quantum algorithm implementation. + /// + /// # References + /// We use the phases in "Fixed-Point Amplitude Amplification with + /// an Optimal Number of Queries" + /// - [YoderLowChuang2014](https://arxiv.org/abs/1409.3305) + /// See also "Methodology of composite quantum gates" + /// - [LowYoderChuang2016](https://arxiv.org/abs/1603.03996) + /// for phases in the `RotationPhases` format. + function FixedPointReflectionPhases(nQueries : Int, successMin : Double) + : ReflectionPhases { + let twoPi = 2.0 * PI(); + mutable phasesRot = new Double[nQueries]; + let nQueriesDouble = IntAsDouble(nQueries); + set phasesRot w/= 0 <- 0.0; + let beta = Cosh((1.0 / nQueriesDouble) * ArcCosh(Sqrt(successMin))); + let alpha = Sqrt(1.0 - beta * beta); + + for (idxPhases in 1 .. nQueries - 1) { + set phasesRot w/= idxPhases <- + phasesRot[idxPhases - 1] + + 2.0 * ArcTan( + Tan(twoPi * IntAsDouble(idxPhases) / nQueriesDouble) * alpha + ); + } + + return RotationPhasesAsReflectionPhases(RotationPhases(phasesRot)); + } + +} diff --git a/Standard/src/AmplitudeAmplification/Types.qs b/Standard/src/AmplitudeAmplification/Types.qs index 57f3fa2d007..5addb42ddfa 100644 --- a/Standard/src/AmplitudeAmplification/Types.qs +++ b/Standard/src/AmplitudeAmplification/Types.qs @@ -7,9 +7,14 @@ namespace Microsoft.Quantum.AmplitudeAmplification { /// Phases for a sequence of partial reflections in amplitude amplification. /// /// # Remarks - /// The first parameter is an array of phases for reflection about the start state. The second parameter is an array of phases for reflection about the target state. + /// The first parameter is an array of phases for reflection about the + /// start state. The second parameter is an array of phases for reflection + /// about the target state. /// Both arrays must be of equal length. Note that in many cases, the first phase about the start state and last phase about the target state introduces a global phase shift and may be set to $0$. - newtype ReflectionPhases = (Double[], Double[]); + newtype ReflectionPhases = ( + AboutStart: Double[], + AboutTarget: Double[] + ); /// # Summary /// Phases for a sequence of single-qubit rotations in amplitude amplification. diff --git a/Standard/src/Arithmetic/Increment.qs b/Standard/src/Arithmetic/Increment.qs index f6dd2ae2ec6..53066a5b388 100644 --- a/Standard/src/Arithmetic/Increment.qs +++ b/Standard/src/Arithmetic/Increment.qs @@ -70,9 +70,6 @@ namespace Microsoft.Quantum.Arithmetic { /// encoding. /// ## increment /// The integer by which the `target` is incremented by. - /// - /// # See Also - /// - IncrementByIntegerPhaseLE operation IncrementByInteger(increment : Int, target : LittleEndian) : Unit is Adj + Ctl { ApplyPhaseLEOperationOnLECA(IncrementPhaseByInteger(increment, _), target); } diff --git a/Standard/src/Arithmetic/Modular.qs b/Standard/src/Arithmetic/Modular.qs index a2650934558..2f9bea6c89d 100644 --- a/Standard/src/Arithmetic/Modular.qs +++ b/Standard/src/Arithmetic/Modular.qs @@ -11,28 +11,31 @@ namespace Microsoft.Quantum.Arithmetic { /// # Summary /// Performs a modular increment of a qubit register by an integer constant. /// - /// Let us denote `increment` by a, `modulus` by N and integer encoded in `target` by y + /// # Description + /// Let us denote `increment` by $a$, `modulus` by $N$ and integer encoded in `target` by $y$. /// Then the operation performs the following transformation: /// \begin{align} - /// \ket{y} \mapsto \ket{y + 1 \operatorname{mod} N} + /// \ket{y} \mapsto \ket{(y + a) \operatorname{mod} N} /// \end{align} /// Integers are encoded in little-endian format. /// /// # Input /// ## increment - /// Integer increment a to be added to y. + /// Integer increment $a$ to be added to $y$. /// ## modulus - /// Integer N that mods y + a. + /// Integer $N$ that mods $y + a$. /// ## target - /// Integer y in `LittleEndian` format that `increment` a is added to. + /// Integer $y$ in `LittleEndian` format that `increment` $a$ is added to. /// /// # See Also - /// - IncrementPhaseByModularInteger + /// - Microsoft.Quantum.Arithmetic.IncrementPhaseByModularInteger /// /// # Remarks - /// Assumes that the value of target is less than N. Note that + /// Assumes that the initial value of target is less than $N$ + /// and that the increment $a$ is less than $N$. + /// Note that /// implements - /// the same operation, but in the `PhaseLittleEndian` basis. + /// the same operation in the `PhaseLittleEndian` basis. operation IncrementByModularInteger(increment : Int, modulus : Int, target : LittleEndian) : Unit { body (...) { let inner = IncrementPhaseByModularInteger(increment, modulus, _); @@ -50,17 +53,20 @@ namespace Microsoft.Quantum.Arithmetic { /// # Summary /// Performs a modular increment of a qubit register by an integer constant. /// - /// Let us denote `increment` by a, `modulus` by N and integer encoded in `target` by y + /// # Description + /// Let us denote `increment` by $a$, `modulus` by $N$ and integer encoded in `target` by $y$. /// Then the operation performs the following transformation: - /// |y⟩ ↦ |y+a (mod N)⟩ - /// Integers are encoded in little-endian format in QFT basis + /// \begin{align} + /// \ket{y} \mapsto \ket{(y + a) \operatorname{mod} N} + /// \end{align} + /// Integers are encoded in little-endian format in QFT basis. /// /// # See Also - /// - Microsoft.Quantum.Canon.ModularIncrementLE + /// - Microsoft.Quantum.Arithmetic.IncrementByModularInteger /// /// # Remarks /// Assumes that `target` has the highest bit set to 0. - /// Also assumes that the value of target is less than N. + /// Also assumes that the value of target is less than $N$. /// /// For the circuit diagram and explanation see Figure 5 on [Page 5 /// of arXiv:quant-ph/0205095v3](https://arxiv.org/pdf/quant-ph/0205095v3.pdf#page=5). @@ -113,10 +119,11 @@ namespace Microsoft.Quantum.Arithmetic { /// # Summary /// Performs a modular multiply-and-add by integer constants on a qubit register. /// + /// # Description /// Implements the map /// $$ /// \begin{align} - /// \ket{x} \ket{b} \mapsto \ket{x} \ket{b + a \cdot x \operatorname{mod} N} + /// \ket{x} \ket{b} \mapsto \ket{x} \ket{(b + a \cdot x) \operatorname{mod} N} /// \end{align} /// $$ /// for a given modulus $N$, constant multiplier $a$, and summand $y$. @@ -133,6 +140,9 @@ namespace Microsoft.Quantum.Arithmetic { /// A quantum register representing an unsigned integer to use as the target /// for this operation. /// + /// # See Also + /// - Microsoft.Quantum.Arithmetic.MultiplyAndAddPhaseByModularInteger + /// /// # Remarks /// - For the circuit diagram and explanation see Figure 6 on [Page 7 /// of arXiv:quant-ph/0205095v3](https://arxiv.org/pdf/quant-ph/0205095v3.pdf#page=7) @@ -153,15 +163,15 @@ namespace Microsoft.Quantum.Arithmetic { } /// # Summary - /// The same as ModularAddProductLE, but assumes that summand encodes - /// integers in QFT basis - /// - /// # See Also - /// - Microsoft.Quantum.Canon.ModularAddProductLE + /// The same as MultiplyAndAddByModularInteger, but assumes that the summand encodes + /// integers in QFT basis. /// /// # Remarks /// Assumes that `phaseSummand` has the highest bit set to 0. - /// Also assumes that the value of `phaseSummand` is less than N. + /// Also assumes that the value of `phaseSummand` is less than $N$. + /// + /// # See Also + /// - Microsoft.Quantum.Arithmetic.MultiplyAndAddByModularInteger operation MultiplyAndAddPhaseByModularInteger(constMultiplier : Int, modulus : Int, multiplier : LittleEndian, phaseSummand : PhaseLittleEndian) : Unit is Adj + Ctl { EqualityFactB(modulus <= 2 ^ (Length(phaseSummand!) - 1), true, $"`multiplier` must be big enough to fit integers modulo `modulus`" + $"with highest bit set to 0"); EqualityFactB(constMultiplier >= 0 and constMultiplier < modulus, true, $"`constMultiplier` must be between 0 and `modulus`-1"); @@ -183,19 +193,25 @@ namespace Microsoft.Quantum.Arithmetic { /// # Summary /// Performs modular multiplication by an integer constant on a qubit register. /// - /// Let us denote modulus by N and constMultiplier by a - /// then this operation implements a unitary defined by the following map on + /// # Description + /// Let us denote `modulus` by $N$ and `constMultiplier` by $a$. + /// Then this operation implements a unitary operation defined by the following map on the /// computational basis: - /// |y⟩ ↦ |a⋅y (mod N) ⟩, for all y between 0 and N - 1 + /// $$ + /// \begin{align} + /// \ket{y} \mapsto \ket{(a \cdot y) \operatorname{mod} N} + /// \end{align} + /// $$ + /// for all $y$ between $0$ and $N - 1$. /// /// # Input /// ## constMultiplier /// Constant by which multiplier is being multiplied. Must be co-prime to modulus. /// ## modulus - /// The multiplication operation is performed modulo `modulus` + /// The multiplication operation is performed modulo `modulus`. /// ## multiplier /// The number being multiplied by a constant. - /// This is an array of qubits representing integer in little-endian bit order. + /// This is an array of qubits encoding an integer in little-endian format. /// /// # Remarks /// - For the circuit diagram and explanation see Figure 7 on [Page 8 @@ -204,7 +220,7 @@ namespace Microsoft.Quantum.Arithmetic { /// [arXiv:quant-ph/0205095v3](https://arxiv.org/pdf/quant-ph/0205095v3.pdf) operation MultiplyByModularInteger(constMultiplier : Int, modulus : Int, multiplier : LittleEndian) : Unit is Adj + Ctl { // Check the preconditions using Microsoft.Quantum.Canon.EqualityFactB - EqualityFactB(constMultiplier >= 0 and constMultiplier < modulus, true, $"`constMultiplier` must be between 0 and `modulus`"); + EqualityFactB(0 <= constMultiplier and constMultiplier < modulus, true, $"`constMultiplier` must be between 0 and `modulus`"); EqualityFactB(modulus <= 2 ^ Length(multiplier!), true, $"`multiplier` must be big enough to fit integers modulo `modulus`"); EqualityFactB(IsCoprimeI(constMultiplier, modulus), true, $"`constMultiplier` and `modulus` must be co-prime"); diff --git a/Standard/src/Canon/And.qs b/Standard/src/Canon/And.qs new file mode 100644 index 00000000000..c1c3eec25d2 --- /dev/null +++ b/Standard/src/Canon/And.qs @@ -0,0 +1,367 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Canon { + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Measurement; + + /// # Summary + /// Inverts a given target qubit if and only if both control qubits are in the 1 state, + /// using measurement to perform the adjoint operation. + /// + /// # Description + /// Inverts `target` if and only if both controls are 1, but assumes that + /// `target` is in state 0. The operation has T-count 4, T-depth 2 and + /// requires no helper qubit, and may therefore be preferable to a CCNOT + /// operation, if `target` is known to be 0. The adjoint of this operation + /// is measurement based and requires no T gates. + /// + /// The controlled application of this operation requires no helper qubit, + /// `2^c` `Rz` gates and is not optimized for depth, where `c` is the number + /// of overall control qubits including the two controls from the `ApplyAnd` + /// operation. The adjoint controlled application requires `2^c - 1` `Rz` + /// gates (with an angle twice the size of the non-adjoint operation), no + /// helper qubit and is not optimized for depth. + /// + /// # Input + /// ## control1 + /// First control qubit + /// ## control2 + /// Second control qubit + /// ## target + /// Target auxillary qubit; must be in state 0 + /// + /// # References + /// - Cody Jones: "Novel constructions for the fault-tolerant Toffoli gate", + /// Phys. Rev. A 87, 022328, 2013 + /// [arXiv:1212.5069](https://arxiv.org/abs/1212.5069) + /// doi:10.1103/PhysRevA.87.022328 + /// - Craig Gidney: "Halving the cost of quantum addition", Quantum 2, page + /// 74, 2018 + /// [arXiv:1709.06648](https://arxiv.org/abs/1709.06648) + /// doi:10.1103/PhysRevA.85.044302 + /// - Mathias Soeken: "Quantum Oracle Circuits and the Christmas Tree Pattern", + /// [Blog article from Decemer 19, 2019](https://msoeken.github.io/blog_qac.html) + /// (note: explains the multiple controlled construction) + operation ApplyAnd(control1 : Qubit, control2 : Qubit, target : Qubit) : Unit { + body (...) { + AssertAllZero([target]); + H(target); + T(target); + CNOT(control1, target); + CNOT(control2, target); + within { + CNOT(target, control1); + CNOT(target, control2); + } + apply { + Adjoint T(control1); + Adjoint T(control2); + T(target); + } + HY(target); + } + adjoint (...) { + H(target); + AssertProb([PauliZ], [target], One, 0.5, "Probability of the measurement must be 0.5", 1e-10); + if (IsResultOne(MResetZ(target))) { + CZ(control1, control2); + } + } + controlled (controls, ...) { + _ApplyMultipleControlledAnd(controls + [control1, control2], target); + } + adjoint controlled (controls, ...) { + Adjoint _ApplyMultipleControlledAnd(controls + [control1, control2], target); + } + } + + /// # Summary + /// Inverts a given target qubit if and only if both control qubits are in + /// the 1 state, with T-depth 1, using measurement to perform the adjoint + /// operation. + /// + /// # Description + /// Inverts `target` if and only if both controls are 1, but assumes that + /// `target` is in state 0. The operation has T-count 4, T-depth 1 and + /// requires one helper qubit, and may therefore be preferable to a CCNOT + /// operation, if `target` is known to be 0. The adjoint of this operation + /// is measurement based and requires no T gates, and no helper qubit. + /// + /// # Input + /// ## control1 + /// First control qubit + /// ## control2 + /// Second control qubit + /// ## target + /// Target auxillary qubit; must be in state 0 + /// + /// # References + /// - Cody Jones: "Novel constructions for the fault-tolerant Toffoli gate", + /// Phys. Rev. A 87, 022328, 2013 + /// [arXiv:1212.5069](https://arxiv.org/abs/1212.5069) + /// doi:10.1103/PhysRevA.87.022328 + /// - Peter Selinger: "Quantum circuits of T-depth one", + /// Phys. Rev. A 87, 042302, 2013 + /// [arXiv:1210.0974](https://arxiv.org/abs/1210.0974) + /// doi:10.1103/PhysRevA.87.042302 + operation ApplyLowDepthAnd(control1 : Qubit, control2 : Qubit, target : Qubit) : Unit { + body (...) { + using (helper = Qubit()) { + AssertAllZero([target]); + H(target); + within { + CNOT(target, control1); + CNOT(control1, helper); + CNOT(control2, helper); + CNOT(target, control2); + } + apply { + Adjoint T(control1); + Adjoint T(control2); + T(target); + T(helper); + } + HY(target); + } + } + adjoint (...) { + Adjoint ApplyAnd(control1, control2, target); + } + controlled (controls, ...) { + _ApplyMultipleControlledLowDepthAnd(controls + [control1, control2], target); + } + adjoint controlled (controls, ...) { + Adjoint _ApplyMultipleControlledLowDepthAnd(controls + [control1, control2], target); + } + } + + /// # Summary + /// Creates Gray code sequences + /// + /// # Input + /// ## n + /// Number of bits + /// + /// # Output + /// Array of tuples. First value in tuple is value in GrayCode sequence + /// Second value in tuple is position to change in current value to get + /// next one. + /// + /// # Example + /// ```Q# + /// _GrayCode(2); // [(0, 0),(1, 1),(3, 0),(2, 1)] + /// ``` + function _GrayCode(n : Int) : (Int, Int)[] { + let N = 1 <<< n; + + mutable res = new (Int, Int)[N]; + mutable j = 0; + mutable current = IntAsBoolArray(0, n); + + for (i in 0..N - 1) { + if (i % 2 == 0) { + set j = 0; + } else { + let e = Zip(current, RangeAsIntArray(0..N - 1)); + set j = Snd(Head(Filtered(Fst, e))) + 1; + } + + set j = MaxI(0, Min([j, n - 1])); + set res w/= i <- (BoolArrayAsInt(current), j); + if (j < n) { + set current w/= j <- not current[j]; + } + } + + return res; + } + + /// # Summary + /// Computes the Hamming weight of an integer, i.e., the number of 1s in its + /// binary expansion. + /// + /// # Input + /// ## number + /// Number to compute Hamming weight + /// # Output + /// Hamming weight of the number + function _HammingWeightI(number : Int) : Int { + mutable cnt = number; + set cnt = (cnt &&& 0x5555555555555555) + ((cnt >>> 1) &&& 0x5555555555555555); + set cnt = (cnt &&& 0x3333333333333333) + ((cnt >>> 2) &&& 0x3333333333333333); + set cnt = (cnt &&& 0x0f0f0f0f0f0f0f0f) + ((cnt >>> 4) &&& 0x0f0f0f0f0f0f0f0f); + set cnt = (cnt &&& 0x00ff00ff00ff00ff) + ((cnt >>> 8) &&& 0x00ff00ff00ff00ff); + set cnt = (cnt &&& 0x0000ffff0000ffff) + ((cnt >>> 16) &&& 0x0000ffff0000ffff); + set cnt = (cnt &&& 0x00000000ffffffff) + ((cnt >>> 32) &&& 0x00000000ffffffff); + return cnt; + } + + /// # Summary + /// Returns 1, if `index` has an odd number of 1s and -1, if `index` has an + /// even number of 1s. + /// + /// # Description + /// Value corresponds to the sign of the coefficient of the Rademacher-Walsh + /// spectrum of the n-variable AND function for a given assignment that + /// decides the angle of the rotation. + /// + /// # Input + /// ## index + /// Input assignment as integer (from 0 to 2^n - 1) + function _Angle(index : Int) : Int { + return _HammingWeightI(index) % 2 == 1 ? 1 | -1; + } + + /// # Summary + /// Implements a multiple-controlled Toffoli gate, assuming that target + /// qubit is initialized 0. The adjoint operation assumes that the target + /// qubit will be released to 0. + /// + /// # Input + /// ## controls + /// Control qubits + /// ## target + /// Target qubit + operation _ApplyMultipleControlledAnd(controls : Qubit[], target : Qubit) : Unit { + body (...) { + let vars = Length(controls); + + AssertAllZero([target]); + + H(target); + + let code = _GrayCode(vars); + for (j in 0..Length(code) - 1) { + let (offset, ctrl) = code[j]; + RFrac(PauliZ, _Angle(offset), vars + 1, target); + CNOT(controls[ctrl], target); + } + + HY(target); + } + adjoint (...) { + let vars = Length(controls); + + H(target); + AssertProb([PauliZ], [target], One, 0.5, "Probability of the measurement must be 0.5", 1e-10); + if (IsResultOne(MResetZ(target))) { + for (i in 0..vars - 1) { + let start = 1 <<< i; + let code = _GrayCode(i); + for (j in 0..Length(code) - 1) { + let (offset, ctrl) = code[j]; + RFrac(PauliZ, -_Angle(start + offset), vars, controls[i]); + if (i != 0) { + CNOT(controls[ctrl], controls[i]); + } + } + } + } + } + } + + /// # Summary + /// Arrange control, target, and helper qubits according to an index + /// + /// # Description + /// Returns a Qubit array with target at index 0, and control i at index + /// 2^i. The helper qubits are inserted to all other positions in the + /// array. + function _ArrangeQubits(controls : Qubit[], target : Qubit, helper : Qubit[]) : Qubit[] { + let numControls = Length(controls); + mutable qs = new Qubit[2^numControls] w/ 0 <- target; + mutable cntC = 0; + mutable cntH = 0; + for (i in 1..2^numControls - 1) { + if (i == (i &&& -i)) { + set qs w/= i <- controls[cntC]; + set cntC += 1; + } else { + set qs w/= i <- helper[cntH]; + set cntH += 1; + } + } + return qs; + } + + /// # Summary + /// Implements a multiple-controlled Toffoli gate, assuming that target + /// qubit is initialized 0. The adjoint operation assumes that the target + /// qubit will be released to 0. Requires a Rz depth of 1, while the number + /// of helper qubits are exponential in the number of qubits. + /// + /// # Input + /// ## controls + /// Control qubits + /// ## target + /// Target qubit + operation _ApplyMultipleControlledLowDepthAnd(controls : Qubit[], target : Qubit) : Unit { + body (...) { + let vars = Length(controls); + using (helper = Qubit[2^vars - vars - 1]) { + let qs = _ArrangeQubits(controls, target, helper); + + AssertAllZero([target]); + H(target); + + within { + // initialize helper lines with control lines based on LSB + for (i in 3..2^vars - 1) { + let lsb = i &&& -i; + if (i != lsb) { // i is power of 2 + CNOT(qs[lsb], qs[i]); + } + } + // target to control + ApplyToEachA(CNOT(target, _), controls); + // copy remainder (without LSB) + for (i in 3..2^vars - 1) { + let lsb = i &&& -i; + if (i != lsb) { + CNOT(qs[i - lsb], qs[i]); + } + } + } apply { + for (i in IndexRange(qs)) { + RFrac(PauliZ, _Angle(i), vars + 1, qs[i]); + } + } + + HY(target); + } + } + adjoint (...) { + let vars = Length(controls); + + H(target); + AssertProb([PauliZ], [target], One, 0.5, "Probability of the measurement must be 0.5", 1e-10); + if (IsResultOne(MResetZ(target))) { + using (helper = Qubit[2^vars - vars - 1]) { + let qs = _ArrangeQubits(controls, target, helper); + within { + // this is a bit easier than in the compute part, since + // the target qubit does not have to be copied over to + // the control lines. Therefore, the two LSB CNOT parts + // can be merged into a single loop. + for (i in 3..2^vars - 1) { + let lsb = i &&& -i; + if (i != lsb) { + CNOT(qs[lsb], qs[i]); + CNOT(qs[i - lsb], qs[i]); + } + } + } apply { + for (i in 1..2^vars - 1) { + RFrac(PauliZ, -_Angle(i), vars, qs[i]); + } + } + } + } + } + } +} diff --git a/Standard/src/Canon/Combinators/ApplyMultiControlled.qs b/Standard/src/Canon/Combinators/ApplyMultiControlled.qs index 89bddccafab..ab207594dff 100644 --- a/Standard/src/Canon/Combinators/ApplyMultiControlled.qs +++ b/Standard/src/Canon/Combinators/ApplyMultiControlled.qs @@ -11,7 +11,7 @@ namespace Microsoft.Quantum.Canon { /// # Summary /// The signature type of CCNOT gate. - newtype CCNOTop = ((Qubit, Qubit, Qubit) => Unit is Adj); + newtype CCNOTop = (Apply : ((Qubit, Qubit, Qubit) => Unit is Adj)); /// # Summary @@ -170,10 +170,10 @@ namespace Microsoft.Quantum.Canon { operation AndLadder (ccnot : CCNOTop, controls : Qubit[], targets : Qubit[]) : Unit is Adj { EqualityFactI(Length(controls), Length(targets) + 1, $"Length(controls) must be equal to Length(target) + 1"); Fact(Length(controls) >= 2, $"The operation is not defined for less than 2 controls"); - ccnot!(controls[0], controls[1], targets[0]); + ccnot::Apply(controls[0], controls[1], targets[0]); for (k in 1 .. Length(targets) - 1) { - ccnot!(controls[k + 1], targets[k - 1], targets[k]); + ccnot::Apply(controls[k + 1], targets[k - 1], targets[k]); } } diff --git a/Standard/src/Characterization/Distinguishability.qs b/Standard/src/Characterization/Distinguishability.qs new file mode 100644 index 00000000000..483cdbed803 --- /dev/null +++ b/Standard/src/Characterization/Distinguishability.qs @@ -0,0 +1,234 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Characterization { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Arrays; + + /// # Summary + /// Given two operations which each prepare copies of a state, estimates + /// the real part of the overlap between the states prepared by each + /// operation. + /// + /// # Input + /// ## commonPreparation + /// An operation that prepares a fixed input state. + /// ## preparation1 + /// The first of the two state preparation operations to be compared. + /// ## preparation2 + /// The second of the two state preparation operations to be compared. + /// ## nQubits + /// The number of qubits on which `commonPreparation`, `preparation1`, and + /// `preparation2` all act. + /// ## nMeasurements + /// The number of measurements to use in estimating the overlap. + /// + /// # Remarks + /// This operation uses the Hadamard test to find the real part of + /// $$ + /// \begin{align} + /// \braket{\psi | V^{\dagger} U | \psi} + /// \end{align} + /// $$ + /// where $\ket{\psi}$ is the state prepared by `commonPreparation`, + /// $U$ is the unitary representation of the action of `preparation1`, + /// and where $V$ corresponds to `preparation2`. + /// + /// # References + /// - Aharonov *et al.* [quant-ph/0511096](https://arxiv.org/abs/quant-ph/0511096). + /// + /// # See Also + /// - Microsoft.Quantum.Characterization.EstimateImagOverlapBetweenStates + /// - Microsoft.Quantum.Characterization.EstimateOverlapBetweenStates + operation EstimateRealOverlapBetweenStates( + commonPreparation : (Qubit[] => Unit is Adj), + preparation1 : (Qubit[] => Unit is Adj + Ctl), + preparation2 : (Qubit[] => Unit is Adj + Ctl), + nQubits : Int, nMeasurements : Int + ) + : Double { + return 2.0 * EstimateFrequencyA( + _ApplyHadamardTestOnSingleRegister(false, commonPreparation, preparation1, preparation2, _), + _HeadMeasurement(nQubits + 1), + nQubits + 1, nMeasurements + ) - 1.0; + } + + /// # Summary + /// Given two operations which each prepare copies of a state, estimates + /// the imaginary part of the overlap between the states prepared by each + /// operation. + /// + /// # Input + /// ## commonPreparation + /// An operation that prepares a fixed input state. + /// ## preparation1 + /// The first of the two state preparation operations to be compared. + /// ## preparation2 + /// The second of the two state preparation operations to be compared. + /// ## nQubits + /// The number of qubits on which `commonPreparation`, `preparation1`, and + /// `preparation2` all act. + /// ## nMeasurements + /// The number of measurements to use in estimating the overlap. + /// + /// # Remarks + /// This operation uses the Hadamard test to find the imaginary part of + /// $$ + /// \begin{align} + /// \braket{\psi | V^{\dagger} U | \psi} + /// \end{align} + /// $$ + /// where $\ket{\psi}$ is the state prepared by `commonPreparation`, + /// $U$ is the unitary representation of the action of `preparation1`, + /// and where $V$ corresponds to `preparation2`. + /// + /// # References + /// - Aharonov *et al.* [quant-ph/0511096](https://arxiv.org/abs/quant-ph/0511096). + /// + /// # See Also + /// - Microsoft.Quantum.Characterization.EstimateRealOverlapBetweenStates + /// - Microsoft.Quantum.Characterization.EstimateOverlapBetweenStates + operation EstimateImagOverlapBetweenStates( + commonPreparation : (Qubit[] => Unit is Adj), + preparation1 : (Qubit[] => Unit is Adj + Ctl), + preparation2 : (Qubit[] => Unit is Adj + Ctl), + nQubits : Int, nMeasurements : Int + ) + : Double { + return 2.0 * EstimateFrequencyA( + _ApplyHadamardTestOnSingleRegister(true, commonPreparation, preparation1, preparation2, _), + _HeadMeasurement(nQubits + 1), + nQubits + 1, nMeasurements + ) - 1.0; + } + + + /// # Summary + /// Given two operations which each prepare copies of a state, estimates + /// the squared overlap between the states prepared by each + /// operation. + /// + /// # Input + /// ## preparation1 + /// The first of the two state preparation operations to be compared. + /// ## preparation2 + /// The second of the two state preparation operations to be compared. + /// ## nQubits + /// The number of qubits on which `commonPreparation`, `preparation1`, and + /// `preparation2` all act. + /// ## nMeasurements + /// The number of measurements to use in estimating the overlap. + /// + /// # Remarks + /// This operation uses the SWAP test to find + /// $$ + /// \begin{align} + /// \left| \braket{00\cdots 0 | V^{\dagger} U | 00\cdots 0} \right|^2 + /// \end{align} + /// $$ + /// where $U$ is the unitary representation of the action of `preparation1`, + /// and where $V$ corresponds to `preparation2`. + /// + /// # See Also + /// - Microsoft.Quantum.Characterization.EstimateRealOverlapBetweenStates + /// - Microsoft.Quantum.Characterization.EstimateImagOverlapBetweenStates + operation EstimateOverlapBetweenStates( + preparation1 : (Qubit[] => Unit is Adj), + preparation2 : (Qubit[] => Unit is Adj), + nQubits : Int, nMeasurements : Int + ) + : Double { + let nTotalQubits = 2 * nQubits + 1; + return 2.0 * EstimateFrequencyA( + _ApplySwapTestOnSingleRegister(preparation1, preparation2, _), + _HeadMeasurement(nTotalQubits), + nTotalQubits, nMeasurements + ) - 1.0; + } + + + operation _ApplyHadamardTest( + phaseShift : Bool, + commonPreparation : (Qubit[] => Unit is Adj), + preparation1 : (Qubit[] => Unit is Adj + Ctl), + preparation2 : (Qubit[] => Unit is Adj + Ctl), + control : Qubit, + target : Qubit[] + ) + : Unit is Adj + { + within { + H(control); + } apply { + commonPreparation(target); + Controlled preparation1([control], target); + within { X(control); } + apply { Controlled preparation2([control], target); } + + (phaseShift ? S | I)(control); + } + } + + operation _ApplyHadamardTestOnSingleRegister( + phaseShift : Bool, + commonPreparation : (Qubit[] => Unit is Adj), + preparation1 : (Qubit[] => Unit is Adj + Ctl), + preparation2 : (Qubit[] => Unit is Adj + Ctl), + register : Qubit[] + ) + : Unit is Adj + { + let control = Head(register); + let target = Rest(register); + _ApplyHadamardTest( + phaseShift, + commonPreparation, + preparation1, preparation2, + control, target + ); + } + + + operation _ApplySwapTest( + preparation1 : (Qubit[] => Unit is Adj), + preparation2 : (Qubit[] => Unit is Adj), + control : Qubit, + target1 : Qubit[], + target2 : Qubit[] + ) + : Unit is Adj { + within { + H(control); + } apply { + preparation1(target1); + preparation2(target2); + ApplyToEachCA(Controlled SWAP([control], _), Zip(target1, target2)); + } + } + + operation _ApplySwapTestOnSingleRegister( + preparation1 : (Qubit[] => Unit is Adj), + preparation2 : (Qubit[] => Unit is Adj), + register : Qubit[] + ) + : Unit is Adj { + let control = Head(register); + let targets = Rest(register); + _ApplySwapTest( + preparation1, preparation2, + control, + targets[...Length(targets) / 2 - 1], + targets[Length(targets) / 2...] + ); + } + + function _HeadMeasurement(nQubits : Int) : (Qubit[] => Result) { + return Measure( + ConstantArray(nQubits, PauliI) w/ 0 <- PauliZ, + _ + ); + } + +} diff --git a/Standard/src/Oracles/Convert.qs b/Standard/src/Oracles/Convert.qs index 0f9650c653a..3b804227e36 100644 --- a/Standard/src/Oracles/Convert.qs +++ b/Standard/src/Oracles/Convert.qs @@ -6,20 +6,13 @@ namespace Microsoft.Quantum.Oracles { /// # Summary /// Implementation of . - operation _ObliviousOracleFromDeterministicStateOracle (ancillaOracle : DeterministicStateOracle, signalOracle : ObliviousOracle, ancillaRegister : Qubit[], systemRegister : Qubit[]) : Unit - { - body (...) - { - ancillaOracle!(ancillaRegister); - signalOracle!(ancillaRegister, systemRegister); - } - - adjoint invert; - controlled distribute; - controlled adjoint distribute; + operation _ObliviousOracleFromDeterministicStateOracle(ancillaOracle : DeterministicStateOracle, signalOracle : ObliviousOracle, ancillaRegister : Qubit[], systemRegister : Qubit[]) + : Unit is Adj + Ctl { + ancillaOracle!(ancillaRegister); + signalOracle!(ancillaRegister, systemRegister); } - - + + /// # Summary /// Combines the oracles `DeterministicStateOracle` and `ObliviousOracle`. /// @@ -35,27 +28,19 @@ namespace Microsoft.Quantum.Oracles { /// # See Also /// - Microsoft.Quantum.Canon.DeterministicStateOracle /// - Microsoft.Quantum.Canon.ObliviousOracle - function ObliviousOracleFromDeterministicStateOracle (ancillaOracle : DeterministicStateOracle, signalOracle : ObliviousOracle) : ObliviousOracle - { + function ObliviousOracleFromDeterministicStateOracle (ancillaOracle : DeterministicStateOracle, signalOracle : ObliviousOracle) : ObliviousOracle { return ObliviousOracle(_ObliviousOracleFromDeterministicStateOracle(ancillaOracle, signalOracle, _, _)); } - - + + /// # Summary /// Implementation of . - operation _DeterministicStateOracleFromStateOracle (idxFlagQubit : Int, stateOracle : StateOracle, startQubits : Qubit[]) : Unit - { - body (...) - { - stateOracle!(idxFlagQubit, startQubits); - } - - adjoint invert; - controlled distribute; - controlled adjoint distribute; + operation _DeterministicStateOracleFromStateOracle (idxFlagQubit : Int, stateOracle : StateOracle, startQubits : Qubit[]) + : Unit is Adj + Ctl { + stateOracle!(idxFlagQubit, startQubits); } - - + + /// # Summary /// Converts an oracle of type `StateOracle` to `DeterministicStateOracle`. /// @@ -79,8 +64,8 @@ namespace Microsoft.Quantum.Oracles { { return DeterministicStateOracle(_DeterministicStateOracleFromStateOracle(idxFlagQubit, stateOracle, _)); } - - + + /// # Summary /// Implementation of . operation _StateOracleFromDeterministicStateOracle (idxFlagQubit : Int, oracleStateDeterministic : DeterministicStateOracle, qubits : Qubit[]) : Unit @@ -89,13 +74,13 @@ namespace Microsoft.Quantum.Oracles { { oracleStateDeterministic!(qubits); } - + adjoint invert; controlled distribute; controlled adjoint distribute; } - - + + /// # Summary /// Converts an oracle of type `DeterministicStateOracle` to `StateOracle`. /// @@ -115,8 +100,8 @@ namespace Microsoft.Quantum.Oracles { { return StateOracle(_StateOracleFromDeterministicStateOracle(_, deterministicStateOracle, _)); } - - + + /// # Summary /// Implementation of . operation ReflectionOracleFromDeterministicStateOracleImpl (phase : Double, oracle : DeterministicStateOracle, systemRegister : Qubit[]) : Unit @@ -125,20 +110,20 @@ namespace Microsoft.Quantum.Oracles { { ApplyWithCA(Adjoint oracle!, RAll0(phase, _), systemRegister); } - + adjoint invert; controlled distribute; controlled adjoint distribute; } - - + + /// # Summary /// Constructs reflection about a given state from an oracle. - /// - /// Given the oracle $O$ of type + /// + /// Given the oracle $O$ of type /// , - /// the result of this function is a reflection around the state $\ket{\psi}$ - /// where $O\ket{0} = \ket{\psi}$. + /// the result of this function is a reflection around the state $\ket{\psi}$ + /// where $O\ket{0} = \ket{\psi}$. /// /// # Input /// ## oracle diff --git a/Standard/src/Oracles/Types.qs b/Standard/src/Oracles/Types.qs index a3c395ffd95..0350cd20125 100644 --- a/Standard/src/Oracles/Types.qs +++ b/Standard/src/Oracles/Types.qs @@ -14,7 +14,9 @@ namespace Microsoft.Quantum.Oracles { /// This oracle $O = \boldone - (1 - e^{i \phi}) \ket{\psi}\bra{\psi}$ /// performs a partial reflection by a phase $\phi$ about a single pure state /// $\ket{\psi}$. - newtype ReflectionOracle = ((Double, Qubit[]) => Unit is Adj + Ctl); + newtype ReflectionOracle = ( + ApplyReflection: ((Double, Qubit[]) => Unit is Adj + Ctl) + ); // This oracle O|s>_a|ψ>_s = λ |t>_a U |ψ>_s + ... acts on the ancilla state |s>_a to implement the unitary U on any system state |ψ>_s with amplitude λ in the |t>_a basis. @@ -28,7 +30,7 @@ namespace Microsoft.Quantum.Oracles { /// # Remarks /// This oracle defined by /// $$ - ///O\ket{s}\_a\ket{\psi}\_s= \lambda\ket{t}\_a U \ket{\psi}\_s + \sqrt{1-|\lambda|^2}\ket{t^\perp}\_a\cdots + /// O\ket{s}\_a\ket{\psi}\_s= \lambda\ket{t}\_a U \ket{\psi}\_s + \sqrt{1-|\lambda|^2}\ket{t^\perp}\_a\cdots /// $$ /// acts on the ancilla state $\ket{s}\_a$ to implement the unitary $U$ on any system state $\ket{\psi}\_s$ with amplitude $\lambda$ in the basis flagged by $\ket{t}\_a$. /// The first parameter is the qubit register of $\ket{s}\_a$. The second parameter is the qubit register of $\ket{\psi}\_s$. @@ -64,15 +66,15 @@ namespace Microsoft.Quantum.Oracles { /// # Summary /// Represents a discrete-time oracle. - /// - /// This is an oracle that implements $U^m$ for a fixed operation $U$ + /// + /// This is an oracle that implements $U^m$ for a fixed operation $U$ /// and a non-negative integer $m$. newtype DiscreteOracle = ((Int, Qubit[]) => Unit is Adj + Ctl); /// # Summary /// Represents a continuous-time oracle. - /// - /// This is an oracle that implements + /// + /// This is an oracle that implements /// $U(\delta t) : \ket{\psi(t)} \mapsto \ket{\psi(t + \delta t)}$ /// for all times $t$, where $U$ is a fixed operation, and where /// $\delta t$ is a non-negative real number. diff --git a/Standard/src/Preparation/UniformSuperposition.qs b/Standard/src/Preparation/UniformSuperposition.qs index 6cd27f4fdb8..177c43dcef9 100644 --- a/Standard/src/Preparation/UniformSuperposition.qs +++ b/Standard/src/Preparation/UniformSuperposition.qs @@ -9,12 +9,12 @@ namespace Microsoft.Quantum.Preparation { open Microsoft.Quantum.AmplitudeAmplification; open Microsoft.Quantum.Oracles; open Microsoft.Quantum.Math; - + /// # Summary - /// Creates a uniform superposition over states that encode 0 through `nIndices`. - /// + /// Creates a uniform superposition over states that encode 0 through `nIndices`. + /// /// That is, this unitary $U$ creates a uniform superposition over all number states - /// $0$ to $M-1$, given an input state $\ket{0\cdots 0}$. In other words, + /// $0$ to $M-1$, given an input state $\ket{0\cdots 0}$. In other words, /// $$ /// \begin{align} /// U\ket{0}=\frac{1}{\sqrt{M}}\sum_{j=0}^{M-1}\ket{j}. @@ -57,7 +57,7 @@ namespace Microsoft.Quantum.Preparation { let qubits = flagQubit + targetQubits; let stateOracle = StateOracle(PrepareUniformSuperposition_(nIndices, nQubits, _, _)); - (AmpAmpByOracle(1, stateOracle, 0))(qubits); + (StandardAmplitudeAmplification(1, stateOracle, 0))(qubits); ApplyToEachCA(X, flagQubit); } @@ -88,5 +88,5 @@ namespace Microsoft.Quantum.Preparation { controlled auto; adjoint controlled auto; } - + } diff --git a/Standard/src/Standard.csproj b/Standard/src/Standard.csproj index 27ba8c776f0..78df63647d9 100644 --- a/Standard/src/Standard.csproj +++ b/Standard/src/Standard.csproj @@ -1,14 +1,14 @@ - + + netstandard2.1 Microsoft.Quantum.Standard - x64 + true + false + 1591 - 0162 - 1591 - True Microsoft Microsoft's Quantum standard libraries. © Microsoft Corporation. All rights reserved. @@ -30,7 +30,8 @@ - + + diff --git a/Standard/tests/ANDTests.qs b/Standard/tests/ANDTests.qs new file mode 100644 index 00000000000..4cefc1304f3 --- /dev/null +++ b/Standard/tests/ANDTests.qs @@ -0,0 +1,73 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +namespace Microsoft.Quantum.Tests { + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Logical; + open Microsoft.Quantum.Measurement; + + operation AndTestHelper(polarity1 : Bool, polarity2 : Bool, gate : CCNOTop) : Unit { + using ((control1, control2, target, output) = (Qubit(), Qubit(), Qubit(), Qubit())) { + within { + ApplyPauliFromBitString(PauliX, true, [polarity1, polarity2], [control1, control2]); + gate::Apply(control1, control2, target); + } + apply { + CNOT(target, output); + } + let expected = BoolAsResult(polarity1 and polarity2); + if (MResetZ(output) != expected) { + fail $"Expected output register to be {expected}"; + } + AssertAllZero([control1, control2, target]); + } + } + + operation ControlledAndTestHelper(polarities : Bool[], gate : ((Qubit, Qubit, Qubit) => Unit is Adj+Ctl)) : Unit { + let numControls = Length(polarities); + using ((controls, target, output) = (Qubit[numControls], Qubit(), Qubit())) { + within { + ApplyPauliFromBitString(PauliX, true, polarities, controls); + Controlled gate(controls[0..numControls - 3], (controls[numControls - 2], controls[numControls - 1], target)); + } + apply { + CNOT(target, output); + } + let expected = BoolAsResult(All(EqualB(true, _), polarities)); + if (MResetZ(output) != expected) { + fail $"Expected output register to be {expected}"; + } + AssertAllZero(controls + [target]); + } + } + + @Test("QuantumSimulator") + operation ApplyAndTest() : Unit { + for (p1 in [false, true]) { + for (p2 in [false, true]) { + for (op in [ApplyAnd, ApplyLowDepthAnd]) { + AndTestHelper(p1, p2, CCNOTop(op)); + } + } + } + } + + @Test("QuantumSimulator") + operation ControlledApplyAndTest() : Unit { + for (numControls in 3..5) { + for (assignment in 0..2^numControls - 1) { + ControlledAndTestHelper(IntAsBoolArray(assignment, numControls), ApplyAnd); + } + } + for (numControls in 3..4) { + for (assignment in 0..2^numControls - 1) { + ControlledAndTestHelper(IntAsBoolArray(assignment, numControls), ApplyLowDepthAnd); + } + } + } +} + + diff --git a/Standard/tests/Characterization/DistinguishabilityTests.qs b/Standard/tests/Characterization/DistinguishabilityTests.qs new file mode 100644 index 00000000000..f9f04bb853d --- /dev/null +++ b/Standard/tests/Characterization/DistinguishabilityTests.qs @@ -0,0 +1,60 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Tests { + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Characterization; + open Microsoft.Quantum.Diagnostics; + + @Test("QuantumSimulator") + operation CheckOverlapBetweenPlusAndOne() : Unit { + let prep1 = ApplyToEachCA(H, _); + let prep2 = ApplyToEachCA(X, _); + + EqualityWithinToleranceFact( + EstimateRealOverlapBetweenStates( + NoOp, prep1, prep2, 1, 1000000 + ), + 1.0 / Sqrt(2.0), + 0.02 + ); + EqualityWithinToleranceFact( + EstimateImagOverlapBetweenStates( + NoOp, prep1, prep2, 1, 1000000 + ), + 0.0, + 0.02 + ); + EqualityWithinToleranceFact( + 0.5, + EstimateOverlapBetweenStates( + prep1, prep2, 1, 1000000 + ), + 0.02 + ); + } + + @Test("QuantumSimulator") + operation CheckOverlapWithCommonPreparation() : Unit { + let common = ApplyToEachCA(H, _); + let prep1 = ApplyToEachCA(S, _); + let prep2 = ApplyToEachCA(Z, _); + + EqualityWithinToleranceFact( + EstimateRealOverlapBetweenStates( + common, prep1, prep2, 1, 1000000 + ), + 0.5, + 0.02 + ); + EqualityWithinToleranceFact( + EstimateImagOverlapBetweenStates( + common, prep1, prep2, 1, 1000000 + ), + 0.5, + 0.02 + ); + } +} diff --git a/Standard/tests/Standard.Tests.csproj b/Standard/tests/Standard.Tests.csproj index dc037e959cf..84d317fdcb2 100644 --- a/Standard/tests/Standard.Tests.csproj +++ b/Standard/tests/Standard.Tests.csproj @@ -1,14 +1,11 @@ - + + + + netcoreapp3.0 - x64 - false - latest Microsoft.Quantum.Standard.Tests - - - - 0162 + false @@ -19,12 +16,4 @@ - - - - - - - - diff --git a/updateQDKVersion.sh b/updateQDKVersion.sh index 9f6b10b1eb2..425337a6cb6 100644 --- a/updateQDKVersion.sh +++ b/updateQDKVersion.sh @@ -15,7 +15,7 @@ else fi : ${ver:="$NUGET_VERSION"} -: ${pkgs:="Microsoft.Quantum.Development.Kit;Microsoft.Quantum.IQSharp.Core;Microsoft.Quantum.Simulators;Microsoft.Quantum.Compiler;Microsoft.Quantum.Canon;Microsoft.Quantum.Xunit;Microsoft.Quantum.Chemistry;Microsoft.Quantum.Research"} +: ${pkgs:="Microsoft.Quantum.CsharpGeneration;Microsoft.Quantum.Runtime.Core;Microsoft.Quantum.QSharp.Core;Microsoft.Quantum.Development.Kit;Microsoft.Quantum.IQSharp.Core;Microsoft.Quantum.Simulators;Microsoft.Quantum.Compiler;Microsoft.Quantum.Xunit;Microsoft.Quantum.Chemistry;Microsoft.Quantum.Research"} for pkg in `echo $pkgs | tr ";" "\n"`; do @@ -23,8 +23,13 @@ for pkg in `echo $pkgs | tr ";" "\n"`; do grep --include=\packages.config -lri -e "package *id=\"$pkg\" *version=" * | xargs sed -i $backup "s/package *id=\"$pkg\" *version=\"\([^\"]*\)\"/package id=\"$pkg\" version=\"$ver\"/i" grep --include=\*proj -lri -e "PackageReference *Include=\"$pkg\" *Version=" * | xargs sed -i $backup "s/PackageReference *Include=\"$pkg\" *Version=\"\([^\"]*\)\"/PackageReference Include=\"$pkg\" Version=\"$ver\"/i" + grep --include=*props -lri -e "PackageReference *Include=\"$pkg\" *Version=" * | xargs sed -i $backup "s/PackageReference *Include=\"$pkg\" *Version=\"\([^\"]*\)\"/PackageReference Include=\"$pkg\" Version=\"$ver\"/i" done +# Update the version number of the Quantum Sdk: +grep --include=*proj -lri -e "Sdk=\"Microsoft.Quantum.Sdk\/" * | xargs sed -i $backup "s/Sdk=\"Microsoft.Quantum.Sdk\/\([^\"]*\)\"/Sdk=\"Microsoft.Quantum.Sdk\/$ver\"/i" +grep --include=*Template.xml -lri -e "Sdk=\"Microsoft.Quantum.Sdk\/" * | xargs sed -i $backup "s/Sdk=\"Microsoft.Quantum.Sdk\/\([^\"]*\)\"/Sdk=\"Microsoft.Quantum.Sdk\/$ver\"/i" + echo done! echo From 3e1e4619e6cc5ae829feef1d657262b5878e7f63 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Mon, 13 Jan 2020 11:39:44 -0800 Subject: [PATCH 12/43] Simplify gradient estimation and classifier structure generation (#200) * Clarify the restriction on the number of bits for IntAsBoolArray (#171) * Clarify the restriction on the number of bits for IntAsBoolArray This should fix #166 by providing a more specific error message. * Update Standard/src/Convert/Convert.qs Co-Authored-By: Chris Granade * Allow to have bits = 0 Looks like our tests assume that number = 0 with bits = 0 is a valid scenario; updating the change to account for that * Package updates (#188) * Quantum AND gates (a.k.a. CCNOT with constant target) (#186) * Two AND gate implementations. * Added test case. * Formatting. * Code formatting. * Update Standard/src/Canon/And.qs Co-Authored-By: Chris Granade * Assertion for 0-target. * Added DOI to references. * Named application for CCNOTop. * Rename operations. * Add Test attribute. * Add links to arXiv. * Rename operations. * Better assertion for 0-target. * Fix bug in LowDepthAnd. * Docs. * Doc string convention. * Controlled variant for `ApplyAnd`. * Controlled AndLowDepth. * Adjoint Controlled LowDepthAnd. * References. * Simplify code. * Apply suggestions from code review Co-Authored-By: Chris Granade * Integrate comment. * Removed comment ref to IncrementByIntegerPhaseLE (#189) There appears to be no function IncrementByIntegerPhaseLE, and I guess it is covered by ApplyLEOperationOnPhaseLE. Co-authored-by: Chris Granade * New Hadamard and SWAP test operations. (#196) * First work on Hadamard and SWAP test operations. * (c) header and typo fix. * Fixed typo with placement of phase shift. * Put public operations above private. * Added tests for new operations. * Added API documentation comments. * Newline at end of file. * Refactor AA namespace to use Q# style guide (#197) * Began simplifying AA interface. * Expose traditional AA as new public operation. * Removed rest of "AmpAmp" prefix. * Resolve deprecation warning. * Begin moving classifier structure creation to Q#. * Moving example datasets into new namespace. * Fix datasets used in layered structure demo. * Fixed bug with cyclic entangling layer. * Use new Hadamard test operation to simplify grad est. * Simplify input encoder logic. * Removed layer construction methods moved out to Q#. * Revised name of approximate input encoder. * Removed unused interop library. * Update pack script for new layout. * Addressing feedback. Co-authored-by: Mariia Mykhailova Co-authored-by: bettinaheim <34236215+bettinaheim@users.noreply.github.com> Co-authored-by: Mathias Soeken Co-authored-by: numpde <21158052+numpde@users.noreply.github.com> --- Build/pack.ps1 | 4 +- MachineLearning.sln | 76 ++---- .../src/{Runtime => }/Classification.qs | 4 +- MachineLearning/src/Convert.qs | 66 +++++ MachineLearning/src/DataModel/Interop.cs | 247 ------------------ .../Examples.qs => Datasets/IrisDataset.qs} | 13 +- .../src/Datasets/Properties/NamespaceInfo.qs | 6 + MachineLearning/src/Datasets/WineDataset.qs | 190 ++++++++++++++ MachineLearning/src/{Runtime => }/Features.qs | 0 .../src/{Runtime => }/GradientEstimation.qs | 56 ++-- .../src/{Runtime => }/InputEncoding.qs | 44 ++-- ...ataModel.csproj => MachineLearning.csproj} | 38 ++- .../{Runtime => }/Properties/NamespaceInfo.qs | 0 .../src/{Runtime => }/RotationSequences.qs | 24 +- MachineLearning/src/Runtime/Convert.qs | 63 ----- MachineLearning/src/Runtime/Runtime.csproj | 18 -- MachineLearning/src/Structure.qs | 75 ++++++ MachineLearning/src/{Runtime => }/Training.qs | 2 +- MachineLearning/src/{Runtime => }/Types.qs | 0 MachineLearning/src/{Runtime => }/Utils.qs | 4 +- .../src/{Runtime => }/Validation.qs | 0 .../tests/MachineLearningTests.csproj | 3 +- 22 files changed, 449 insertions(+), 484 deletions(-) rename MachineLearning/src/{Runtime => }/Classification.qs (96%) create mode 100644 MachineLearning/src/Convert.qs delete mode 100644 MachineLearning/src/DataModel/Interop.cs rename MachineLearning/src/{Runtime/Examples.qs => Datasets/IrisDataset.qs} (96%) create mode 100644 MachineLearning/src/Datasets/Properties/NamespaceInfo.qs create mode 100644 MachineLearning/src/Datasets/WineDataset.qs rename MachineLearning/src/{Runtime => }/Features.qs (100%) rename MachineLearning/src/{Runtime => }/GradientEstimation.qs (65%) rename MachineLearning/src/{Runtime => }/InputEncoding.qs (76%) rename MachineLearning/src/{DataModel/DataModel.csproj => MachineLearning.csproj} (58%) rename MachineLearning/src/{Runtime => }/Properties/NamespaceInfo.qs (100%) rename MachineLearning/src/{Runtime => }/RotationSequences.qs (68%) delete mode 100644 MachineLearning/src/Runtime/Convert.qs delete mode 100644 MachineLearning/src/Runtime/Runtime.csproj create mode 100644 MachineLearning/src/Structure.qs rename MachineLearning/src/{Runtime => }/Training.qs (99%) rename MachineLearning/src/{Runtime => }/Types.qs (100%) rename MachineLearning/src/{Runtime => }/Utils.qs (87%) rename MachineLearning/src/{Runtime => }/Validation.qs (100%) diff --git a/Build/pack.ps1 b/Build/pack.ps1 index a69ed719b75..d6892b6f3f7 100644 --- a/Build/pack.ps1 +++ b/Build/pack.ps1 @@ -14,7 +14,7 @@ function Pack-One() { -c $Env:BUILD_CONFIGURATION ` -v $Env:BUILD_VERBOSITY ` -o $Env:NUGET_OUTDIR ` - /property:PackageVersion=$Env:NUGET_VERSION + /property:PackageVersion=$Env:NUGET_VERSION if ($LastExitCode -ne 0) { Write-Host "##vso[task.logissue type=error;]Failed to pack $project." @@ -29,7 +29,7 @@ Write-Host "##[info]Pack Chemistry library" Pack-One '../Chemistry/src/DataModel/DataModel.csproj' Write-Host "##[info]Pack QML library" -Pack-One '../MachineLearning/src/DataModel/DataModel.csproj' +Pack-One '../MachineLearning/src/MachineLearning.csproj' Write-Host "##[info]Pack Numerics library" Pack-One '../Numerics/src/Numerics.csproj' diff --git a/MachineLearning.sln b/MachineLearning.sln index 2e787d25cc1..a083c3f402f 100644 --- a/MachineLearning.sln +++ b/MachineLearning.sln @@ -3,15 +3,11 @@ Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 15 VisualStudioVersion = 15.0.26124.0 MinimumVisualStudioVersion = 15.0.26124.0 -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "MachineLearning", "MachineLearning", "{A16B06ED-70E8-4494-B040-D446F0F74588}" +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "MachineLearning", "MachineLearning", "{D067C787-94C3-4DB8-9012-1F22AE784BEF}" EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{E540364C-047F-446E-B8C1-CD41224E2282}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "MachineLearningTests", "MachineLearning\tests\MachineLearningTests.csproj", "{94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Runtime", "MachineLearning\src\Runtime\Runtime.csproj", "{4C399D64-0435-47E0-99D3-AA898E640717}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DataModel", "MachineLearning\src\DataModel\DataModel.csproj", "{E4A725A7-3525-4FC5-9794-4317B5DB9C9B}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "MachineLearningTests", "MachineLearning\tests\MachineLearningTests.csproj", "{0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "MachineLearning", "MachineLearning\src\MachineLearning.csproj", "{B045BF35-6BE6-4982-9618-8725C70D3F91}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -26,47 +22,33 @@ Global HideSolutionNode = FALSE EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {4C399D64-0435-47E0-99D3-AA898E640717}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {4C399D64-0435-47E0-99D3-AA898E640717}.Debug|Any CPU.Build.0 = Debug|Any CPU - {4C399D64-0435-47E0-99D3-AA898E640717}.Debug|x64.ActiveCfg = Debug|Any CPU - {4C399D64-0435-47E0-99D3-AA898E640717}.Debug|x64.Build.0 = Debug|Any CPU - {4C399D64-0435-47E0-99D3-AA898E640717}.Debug|x86.ActiveCfg = Debug|Any CPU - {4C399D64-0435-47E0-99D3-AA898E640717}.Debug|x86.Build.0 = Debug|Any CPU - {4C399D64-0435-47E0-99D3-AA898E640717}.Release|Any CPU.ActiveCfg = Release|Any CPU - {4C399D64-0435-47E0-99D3-AA898E640717}.Release|Any CPU.Build.0 = Release|Any CPU - {4C399D64-0435-47E0-99D3-AA898E640717}.Release|x64.ActiveCfg = Release|Any CPU - {4C399D64-0435-47E0-99D3-AA898E640717}.Release|x64.Build.0 = Release|Any CPU - {4C399D64-0435-47E0-99D3-AA898E640717}.Release|x86.ActiveCfg = Release|Any CPU - {4C399D64-0435-47E0-99D3-AA898E640717}.Release|x86.Build.0 = Release|Any CPU - {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Debug|Any CPU.Build.0 = Debug|Any CPU - {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Debug|x64.ActiveCfg = Debug|Any CPU - {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Debug|x64.Build.0 = Debug|Any CPU - {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Debug|x86.ActiveCfg = Debug|Any CPU - {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Debug|x86.Build.0 = Debug|Any CPU - {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Release|Any CPU.ActiveCfg = Release|Any CPU - {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Release|Any CPU.Build.0 = Release|Any CPU - {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Release|x64.ActiveCfg = Release|Any CPU - {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Release|x64.Build.0 = Release|Any CPU - {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Release|x86.ActiveCfg = Release|Any CPU - {E4A725A7-3525-4FC5-9794-4317B5DB9C9B}.Release|x86.Build.0 = Release|Any CPU - {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Debug|Any CPU.Build.0 = Debug|Any CPU - {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Debug|x64.ActiveCfg = Debug|Any CPU - {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Debug|x64.Build.0 = Debug|Any CPU - {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Debug|x86.ActiveCfg = Debug|Any CPU - {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Debug|x86.Build.0 = Debug|Any CPU - {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Release|Any CPU.ActiveCfg = Release|Any CPU - {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Release|Any CPU.Build.0 = Release|Any CPU - {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Release|x64.ActiveCfg = Release|Any CPU - {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Release|x64.Build.0 = Release|Any CPU - {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Release|x86.ActiveCfg = Release|Any CPU - {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B}.Release|x86.Build.0 = Release|Any CPU + {94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C}.Debug|Any CPU.Build.0 = Debug|Any CPU + {94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C}.Debug|x64.ActiveCfg = Debug|Any CPU + {94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C}.Debug|x64.Build.0 = Debug|Any CPU + {94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C}.Debug|x86.ActiveCfg = Debug|Any CPU + {94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C}.Debug|x86.Build.0 = Debug|Any CPU + {94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C}.Release|Any CPU.ActiveCfg = Release|Any CPU + {94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C}.Release|Any CPU.Build.0 = Release|Any CPU + {94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C}.Release|x64.ActiveCfg = Release|Any CPU + {94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C}.Release|x64.Build.0 = Release|Any CPU + {94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C}.Release|x86.ActiveCfg = Release|Any CPU + {94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C}.Release|x86.Build.0 = Release|Any CPU + {B045BF35-6BE6-4982-9618-8725C70D3F91}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B045BF35-6BE6-4982-9618-8725C70D3F91}.Debug|Any CPU.Build.0 = Debug|Any CPU + {B045BF35-6BE6-4982-9618-8725C70D3F91}.Debug|x64.ActiveCfg = Debug|Any CPU + {B045BF35-6BE6-4982-9618-8725C70D3F91}.Debug|x64.Build.0 = Debug|Any CPU + {B045BF35-6BE6-4982-9618-8725C70D3F91}.Debug|x86.ActiveCfg = Debug|Any CPU + {B045BF35-6BE6-4982-9618-8725C70D3F91}.Debug|x86.Build.0 = Debug|Any CPU + {B045BF35-6BE6-4982-9618-8725C70D3F91}.Release|Any CPU.ActiveCfg = Release|Any CPU + {B045BF35-6BE6-4982-9618-8725C70D3F91}.Release|Any CPU.Build.0 = Release|Any CPU + {B045BF35-6BE6-4982-9618-8725C70D3F91}.Release|x64.ActiveCfg = Release|Any CPU + {B045BF35-6BE6-4982-9618-8725C70D3F91}.Release|x64.Build.0 = Release|Any CPU + {B045BF35-6BE6-4982-9618-8725C70D3F91}.Release|x86.ActiveCfg = Release|Any CPU + {B045BF35-6BE6-4982-9618-8725C70D3F91}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(NestedProjects) = preSolution - {E540364C-047F-446E-B8C1-CD41224E2282} = {A16B06ED-70E8-4494-B040-D446F0F74588} - {4C399D64-0435-47E0-99D3-AA898E640717} = {E540364C-047F-446E-B8C1-CD41224E2282} - {E4A725A7-3525-4FC5-9794-4317B5DB9C9B} = {E540364C-047F-446E-B8C1-CD41224E2282} - {0F1A243B-5263-4F97-ACAE-A7A6BAB8163B} = {A16B06ED-70E8-4494-B040-D446F0F74588} + {94EBDF5F-0A9D-4CE5-9D16-3FF323B8792C} = {D067C787-94C3-4DB8-9012-1F22AE784BEF} + {B045BF35-6BE6-4982-9618-8725C70D3F91} = {D067C787-94C3-4DB8-9012-1F22AE784BEF} EndGlobalSection EndGlobal diff --git a/MachineLearning/src/Runtime/Classification.qs b/MachineLearning/src/Classification.qs similarity index 96% rename from MachineLearning/src/Runtime/Classification.qs rename to MachineLearning/src/Classification.qs index aa3d9fae7e2..771270819ca 100644 --- a/MachineLearning/src/Runtime/Classification.qs +++ b/MachineLearning/src/Classification.qs @@ -7,7 +7,7 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Canon; open Microsoft.Quantum.Convert; - + operation _PrepareClassification( encoder : (LittleEndian => Unit is Adj + Ctl), parameters : Double[], @@ -28,7 +28,7 @@ namespace Microsoft.Quantum.MachineLearning { ) : Double { let nQubits = FeatureRegisterSize(sample); - let circEnc = NoisyInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); + let circEnc = ApproximateInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); let encodedSample = StateGenerator(nQubits, circEnc); return 1.0 - EstimateFrequencyA( _PrepareClassification(encodedSample::Apply, parameters, gates, _), diff --git a/MachineLearning/src/Convert.qs b/MachineLearning/src/Convert.qs new file mode 100644 index 00000000000..9149c146e3b --- /dev/null +++ b/MachineLearning/src/Convert.qs @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Math; + + function unFlattenSchedule(sc : Int[][]) : SamplingSchedule + { + mutable ret = new Range[0]; + for (flattenedRange in sc) { + set ret += [flattenedRange[0]..flattenedRange[1]..flattenedRange[2]]; + } + return SamplingSchedule(ret); + } + + function unFlattenLabeledSamples(dat:Double[][], labs:Int[]) : LabeledSample[] { + mutable cnt = MinI(Length(dat), Length(labs)); + mutable ret = new LabeledSample[cnt]; + for (j in 0..(cnt - 1)) { + set ret w/= j <- LabeledSample(dat[j], labs[j]); + } + return ret; + } + + /// Debugging prop + operation unFlattenPauli(p:Int): Pauli + { + if (p==1) + { + return PauliX; + } + if (p==2) + { + return PauliY; + } + if (p==3) + { + return PauliZ; + } + return PauliI; + } + + /// Debugging prop + /// upcasting controlled rotation in flat representation (paramIx,pauliIx,gateSpan) + operation unFlattenControlledRotation(cod:Int[]): ControlledRotation { + return ControlledRotation( + GateSpan( + cod[2], cod[3...] + ), + unFlattenPauli(cod[1]), + cod[0] + ); + } + + /// Debugging prop + operation unFlattenGateSequence(seq: Int[][]) : GateSequence { + mutable tmp = new ControlledRotation[Length(seq)]; + for (icr in 0..(Length(seq) - 1)) { + set tmp w/= icr <- unFlattenControlledRotation(seq[icr]); + } + return GateSequence(tmp); + } + +} \ No newline at end of file diff --git a/MachineLearning/src/DataModel/Interop.cs b/MachineLearning/src/DataModel/Interop.cs deleted file mode 100644 index a59d812d233..00000000000 --- a/MachineLearning/src/DataModel/Interop.cs +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -using Microsoft.Quantum.Simulation.Core; -using Microsoft.Quantum.Simulation.Simulators; -using System; -using System.IO; -using System.Linq; -using System.Runtime.InteropServices; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; - - - -/// -/// This code space provides suggested interoperability classes for running the -/// Q# Qccc (quantum circuit-centric classifier) code on Microsoft quantum simulator -/// -namespace Microsoft.Quantum.MachineLearning.Interop -{ - using Microsoft.Quantum.Logical; - using Microsoft.Quantum.MachineLearning; - using System.Runtime.CompilerServices; - using System.Runtime.ExceptionServices; - using System.Runtime.InteropServices.ComTypes; - using System.Xml; - - - /// - /// Quick conversions into IQArray format - /// - public class Qonvert - { - - public static long ToC(char pauli) - { - if (pauli.Equals('I')) - { - return 0L; - } - if (pauli.Equals('X')) - { - return 1L; - } - if (pauli.Equals('Y')) - { - return 2L; - } - if (pauli.Equals('Z')) - { - return 3L; - } - return -1L; - } - - public static IQArray> ToQ(List src) - { - List> tmp = new List>(src.Count); - for (int ix = 0; ix < src.Count; ix++) - { - tmp.Add(new QArray(src[ix])); - } - return new QArray>(tmp.ToArray()); - } - - public static IQArray ToQ(List src) - { - return new QArray(src.ToArray()); - } - - public static IQArray> ToQ(List src) - { - List> tmp = new List>(src.Count); - for (int ix = 0; ix < src.Count; ix++) - { - tmp.Add(new QArray(src[ix])); - } - return new QArray>(tmp.ToArray()); - } - } //Qonvert - - public class ClassificationModel - { - long _nQubits; - IQArray> _structure; - IQArray _cachedParameters; - double _bias; - - public ClassificationModel(long nQubits) - { - _nQubits = nQubits; - _structure = null; - _cachedParameters = null; - _bias = -2.0; - } - - public ClassificationModel(long nQubits, List structure) - { - _nQubits = nQubits; - _structure = Qonvert.ToQ(structure); - _cachedParameters = null; - _bias = -2.0; - } - - public ClassificationModel(long nQubits, List structure,double[] parameters) - { - _nQubits = nQubits; - _structure = Qonvert.ToQ(structure); - _cachedParameters = new QArray(parameters); - _bias = -2.0; - } - - public ClassificationModel(long nQubits, List structure, double[] parameters, double bias) - { - _nQubits = nQubits; - _structure = Qonvert.ToQ(structure); - _cachedParameters = new QArray(parameters); - _bias = bias; - } - - public bool isTrained - { - get { return (_bias > -1.5) && (_structure != null) && (_cachedParameters != null); } - } - - public IQArray> CircuitStructure - { - get { return _structure; } - set { _structure = value; } - } - - public IQArray CachedParameters - { - get { return _cachedParameters; } - } - - public double Bias - { - get { return _bias; } - } - - /// - /// Creates a layer of nQubits Pauli rotations - /// - /// Number of qubits to rotate - /// Type of Pauli gate - /// Sequence of nQubits rotation templates - public static List LocalRotationsLayer(long nQubits, char pauli) - { - List ret = new List((int)nQubits); - for (long iq = 0; iq < nQubits; iq++) - { - long[] localRp = new long[] { -1, Qonvert.ToC(pauli), iq }; - ret.Add(localRp); - } - return ret; - } - - /// - /// Creates a layer of nQubits Pauli rotations - /// - /// Number of qubits to rotate - /// Type of Pauli gate - /// Sequence of nQubits rotation templates - public static List PartialLocalLayer(long[] indices, char pauli) - { - List ret = new List(indices.Length); - foreach (long iq in indices) - { - long[] localRp = new long[] { -1, Qonvert.ToC(pauli), iq }; - ret.Add(localRp); - } - return ret; - } - - /// - /// Creates a cyclic block of nQubits controlled rotations that starts - /// with control qubit (nQubits-1), target qubit (cspan-1) % nQubits , followed by a - /// ladder of entanglers with control qubits iq and target qubit (iq+cspan) % nQubits - /// - /// Number of qubits to entangle - /// - /// index offset between control and target qubits - /// - public static List CyclicEntanglerLayer(long nQubits, char pauli, long cspan) - { - List ret = new List((int)nQubits); - ret.Add(new long[] { -1, Qonvert.ToC(pauli), (long) ((cspan-1) % nQubits), nQubits - 1 }); - for (long iq = 1; iq < nQubits; iq++) - { - long[] entRp = new long[] { -1, Qonvert.ToC(pauli), (long)(((iq + 1) * cspan - 1) % nQubits), (long)((iq*cspan - 1) % nQubits) }; - ret.Add(entRp); - } - return ret; - } - - public static List CyclicEntanglerLayer(long nQubits, char pauli) - { - return CyclicEntanglerLayer(nQubits, pauli, 1L); - } - - public static List JoinLayers(List> layers) - { - List structure = new List(layers.Count * layers[0].Count); - for (int ila = 0; ila < layers.Count; ila++) - { - structure.AddRange(layers[ila]); - } - return structure; - } - - public static void reindex(List struc) - { - for (int ix=0; ix < struc.Count; ix++) - { - long[] gt = struc[ix]; - gt[0] = ix; - struc[ix] = gt; - } - } - - public long CountMisclassifications(double tolerance, IQArray> samples, IQArray knownLabels, IQArray> validationSchedule, long nMeasurements, uint randomizationSeed) - { - if (this.isTrained) - { - var sim = new QuantumSimulator(false, randomizationSeed); - return CountValidationMisses.Run(sim, tolerance, this._nQubits, samples, knownLabels, validationSchedule, this._structure, this.CachedParameters, this.Bias, nMeasurements).Result; - } - return long.MaxValue; - } - - public long CountMisclassifications(double tolerance, List samples, List knownLabels, List validationSchedule, long nMeasurements, uint randomizationSeed) - { - return CountMisclassifications(tolerance, Qonvert.ToQ(samples), Qonvert.ToQ(knownLabels), Qonvert.ToQ(validationSchedule), nMeasurements, randomizationSeed); - } - - public long CountMisclassifications(double tolerance, List samples, List knownLabels, long nMeasurements, uint randomizationSeed) - { - var validationSchedule = new List(1); - validationSchedule.Add(new long[] { 0L, 1L, ((long)(samples.Count - 1)) }); - return CountMisclassifications(tolerance, Qonvert.ToQ(samples), Qonvert.ToQ(knownLabels), Qonvert.ToQ(validationSchedule), nMeasurements, randomizationSeed); - } - - } //class ClassificationModel - -} diff --git a/MachineLearning/src/Runtime/Examples.qs b/MachineLearning/src/Datasets/IrisDataset.qs similarity index 96% rename from MachineLearning/src/Runtime/Examples.qs rename to MachineLearning/src/Datasets/IrisDataset.qs index b7c3ccd4129..0f7cc0101db 100644 --- a/MachineLearning/src/Runtime/Examples.qs +++ b/MachineLearning/src/Datasets/IrisDataset.qs @@ -1,7 +1,8 @@ -namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Primitive; - open Microsoft.Quantum.Convert; - open Microsoft.Quantum.Math; +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning.Datasets { + open Microsoft.Quantum.MachineLearning; operation IrisTrainingData() : LabeledSample[] { return [LabeledSample(([0.581557, 0.562824, 0.447721, 0.380219], 1)), @@ -87,8 +88,4 @@ namespace Microsoft.Quantum.MachineLearning { ]; } - operation Examples () : Unit - { - - } } diff --git a/MachineLearning/src/Datasets/Properties/NamespaceInfo.qs b/MachineLearning/src/Datasets/Properties/NamespaceInfo.qs new file mode 100644 index 00000000000..b1559562869 --- /dev/null +++ b/MachineLearning/src/Datasets/Properties/NamespaceInfo.qs @@ -0,0 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/// # Summary +/// TODO +namespace Microsoft.Quantum.MachineLearning.Datasets { } diff --git a/MachineLearning/src/Datasets/WineDataset.qs b/MachineLearning/src/Datasets/WineDataset.qs new file mode 100644 index 00000000000..3c944183a8f --- /dev/null +++ b/MachineLearning/src/Datasets/WineDataset.qs @@ -0,0 +1,190 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning.Datasets { + open Microsoft.Quantum.MachineLearning; + + operation WineData() : LabeledSample[] { + return [ + LabeledSample([0.363205411076437,0.0638668636484459,0.178532471398649,-0.0288976056473681,0.433507480753481,0.26765452507215,0.295933366591769,0.0407618624310926,0.335961380122613,0.183046369886591,0.197761713895692,0.42801676498221,0.286786326879289,0.102563320682669,0.102563320682669,0.102563320682669], 1), + LabeledSample([0.249938938530866,0.103806961963521,0.0288119563511447,-0.316327610625978,0.213915431364644,0.331181226646993,0.358247971682033,0.0243452672750831,0.102474003311389,0.142145109786172,0.289810795079708,0.452985093276961,0.397496120059943,0.145592040692672,0.145592040692672,0.145592040692672], 1), + LabeledSample([0.18017551770934,0.159954186450334,0.330335005051653,0.11029657222382,0.172026075662364,0.285685353685049,0.3437678958266,0.0687101235918643,0.503546189307832,0.198186899162854,0.204255658590153,0.289189504133849,0.363892624800524,0.109472606661304,0.109472606661304,0.109472606661304], 1), + LabeledSample([0.3478185280261,0.0855741081675129,0.193962405871525,0.0214750733479136,0.256731607736543,0.454743707959586,0.32073047384103,-0.00578276910401499,0.274962030580035,0.291023282748096,0.0739296574471514,0.294961529988612,0.424228072429038,0.091790139403591,0.091790139403591,0.091790139403591], 1), + LabeledSample([0.362297632590457,0.0713765952097998,0.1918508326995,-0.0471194731007316,0.279898551657966,0.380054980216169,0.348218985017828,0.113013918394729,0.255594598610856,0.259339012361842,0.206751611476739,0.206506348822603,0.466330974472638,0.103865658362098,0.103865658362098,0.103865658362098], 1), + LabeledSample([0.456363512237805,0.0989267315498482,0.220396293818176,-0.0848977157377709,0.126570706690875,0.231455369932631,0.253057526681058,0.0748906926624935,0.296662776679902,0.183096193491806,0.215186020544714,0.415101672494017,0.45173680541366,0.119319816525617,0.119319816525617,0.119319816525617], 1), + LabeledSample([0.365824230547686,0.134384795182974,0.304649953841435,0.0652321211089234,0.408030734314465,0.244219941368521,0.237852026774996,0.0838599153918433,0.0708419429859215,0.158763660222825,0.231785452457851,0.392781508852428,0.429930004244632,0.112903947819199,0.112903947819199,0.112903947819199], 1), + LabeledSample([0.566183478055546,0.0654724741578849,0.0439622864967305,-0.119206110391732,0.143004569028705,0.320916045012887,0.340893691950237,0.0630283295835976,0.305745022199024,0.184756786750361,0.267797291682324,0.244495204044942,0.333036138702624,0.122972758364531,0.122972758364531,0.122972758364531], 1), + LabeledSample([0.337713046108315,0.122718945059575,0.10700561052042,0.0764645126338355,0.201946721703765,0.300339986693404,0.331847112159928,-0.0299072363915305,0.357482732812817,0.189234861665545,0.330277114346835,0.269442761691643,0.484863724448287,0.10199748278123,0.10199748278123,0.10199748278123], 1), + LabeledSample([0.466701129863441,0.0454693396504955,0.161149317242173,0.0326284696596175,0.133694628665751,0.177107619792893,0.278006483001738,0.0233203461257999,0.201144615896292,0.191636316683084,0.381999714367244,0.268736937921384,0.521861643126541,0.139462703109841,0.139462703109841,0.139462703109841], 1), + LabeledSample([0.352356775250346,0.0849147066288441,0.213994619198652,-0.0143836459418002,0.0452884898165237,0.283231761082382,0.322192973337249,0.0671114837383224,0.268871309289799,0.230329073599442,0.342318391134332,0.273703498451036,0.513006853430941,0.130939282823474,0.130939282823474,0.130939282823474], 1), + LabeledSample([0.43089413940549,0.0626199622575399,0.147535243701546,-0.201497441491844,0.0531208270245816,0.316671679326467,0.364741512975659,0.205099273306104,0.444154125496589,0.157464852880314,0.312671915003017,0.168320603210868,0.306105868416982,0.096560575593511,0.096560575593511,0.096560575593511], 1), + LabeledSample([0.272726407517705,0.0827168324333284,0.3496106940634,0.0446612423026689,0.296538163194931,0.299452238244459,0.294137889421737,0.0690664590267155,0.12788722340928,0.313586218717624,0.376912296280345,0.225524110093736,0.426286381219746,0.110040339141755,0.110040339141755,0.110040339141755], 1), + LabeledSample([0.383572208467366,0.0935136576220856,0.343620408177357,0.168402125357628,0.367514242898029,0.27310051209359,0.313807941452454,0.101821494321014,0.257525175254316,0.224370057363687,0.22136861749585,0.165326053699488,0.391595143068333,0.104650184385657,0.104650184385657,0.104650184385657], 1), + LabeledSample([0.316013513913644,0.0493226285823722,0.306955303083295,0.179519413042103,0.334809905839043,0.328494083962476,0.375590411592675,0.198432370679759,0.203509333335944,0.267811707061401,0.277734218718739,0.158015615196535,0.343836981820164,0.111558803879491,0.111558803879491,0.111558803879491], 1), + LabeledSample([0.31870497106277,0.0429712614071499,0.184285151711081,0.00964542259308528,0.209961702784,0.342183874334946,0.378094966508491,0.0787814637408952,0.20026575558015,0.344191686729784,0.285912288149745,0.176960266353846,0.505227332666082,0.0918346293577422,0.0918346293577422,0.0918346293577422], 1), + LabeledSample([0.356307758352805,0.0571295107798862,0.103665767801926,-0.012079833971045,0.453569690442849,0.336083659933176,0.334421582101246,-0.00692790241244995,0.307010347549411,0.196965160324229,0.246333773806227,0.41175624147722,0.169625166567189,0.109966889006815,0.109966889006815,0.109966889006815], 1), + LabeledSample([0.161217748043416,0.432012087856065,0.378968684463582,0.131159855324296,0.217860878028004,0.226361121435969,0.255816095501333,0.00678340496692604,0.323664339685322,0.137120494937026,0.242891887659839,0.436933255666067,0.195077722155152,0.130180031547429,0.130180031547429,0.130180031547429], 1), + LabeledSample([0.33387321962864,0.104003734911291,0.174357166514354,0.0188598420614302,0.200238000417398,0.278477279672617,0.335197145956839,0.0359753128735102,0.222720512525599,0.076991773708088,0.301339874113075,0.552589883968051,0.339490864618573,0.127425890371198,0.127425890371198,0.127425890371198], 1), + LabeledSample([0.164804886510591,0.0722538432783593,0.333530926538808,0.0997386246383389,0.144057309090506,0.28478622392534,0.286789335102904,0.0251279078559012,0.174644396421027,0.103329402020434,0.336621180516676,0.538126003301765,0.387138650464392,0.150272467405704,0.150272467405704,0.150272467405704], 1), + LabeledSample([0.138045034940391,0.10096733243651,0.565457577461934,0.357529010586571,0.372975197652389,0.212164465546768,0.223324251871177,0.245882968670251,0.222005459488057,0.0440548078915432,0.236854156367615,0.257151478091355,0.167676962473548,0.0951383178498998,0.0951383178498998,0.0951383178498998], 1), + LabeledSample([0.27706228465463,0.0944690423993351,0.371279295793269,-0.0090237059702085,0.101794091444167,0.367202038390134,0.366416680746154,0.14682141141967,0.153385221675984,0.16810232470317,0.159181546018473,0.370233800319128,0.454471601718924,0.134936499639745,0.134936499639745,0.134936499639745], 1), + LabeledSample([0.317210982002182,0.106460924648269,0.033146499020418,0.0535834830689089,0.143461867556865,0.287506074549938,0.276972889518789,0.0472878373297268,0.147745460243886,0.117321269850464,0.302067517823741,0.305652499113775,0.63044189160374,0.167495270900221,0.167495270900221,0.167495270900221], 1), + LabeledSample([0.323782804937267,0.0967686336582948,0.413611657738741,0.150690255678227,0.243561977694651,0.328356719570779,0.307544863864273,0.15984164703184,0.214782154388153,0.117457351693143,0.361086484054943,0.346951270439723,0.238229898839817,0.111512154006688,0.111512154006688,0.111512154006688], 1), + LabeledSample([0.279349555106746,0.0370104144183211,0.334213378417837,0.282297751914523,0.165302668712666,0.321496686499494,0.331821540835722,0.0539160322994988,0.368686009342752,0.191790839601492,0.301258189226166,0.179073860160303,0.395943808377124,0.105194017606742,0.105194017606742,0.105194017606742], 1), + LabeledSample([0.27005015291199,0.0630772889648215,0.154616485583782,0.138130446588243,0.235268666883373,0.310026088106195,0.346841534612754,-0.0331329802316931,0.27231483724987,0.293017836931826,0.253125380319702,0.23158729460557,0.539645635992194,0.112998758441953,0.112998758441953,0.112998758441953], 1), + LabeledSample([0.346713411485213,0.105012869661639,0.184811072491187,0.0548181961587539,0.253625600781446,0.237872750892395,0.318961201449689,0.271339376712739,0.332372860697775,0.0850741828543554,0.420506007983091,0.274055170038427,0.333109503610011,0.135065944999207,0.135065944999207,0.135065944999207], 1), + LabeledSample([0.271022354903467,0.0390745208209509,0.317889164962927,0.139509100089267,0.474001987855805,0.294622995319019,0.243367356561056,0.29314414864518,0.0882580916700038,0.163164893944254,0.323990267781163,0.229579697508733,0.354597817947863,0.100055953996837,0.100055953996837,0.100055953996837], 1), + LabeledSample([0.296171884335753,0.100614775138648,0.218752326936746,0.244152694965527,0.196663727926573,0.319415898270834,0.37104777890512,0.0223818870342515,0.291890527888441,0.19251205199505,0.258089258396546,0.435586076048342,0.288896417266678,0.133850434708707,0.133850434708707,0.133850434708707], 1), + LabeledSample([0.252373671257443,0.0723018808380158,0.532596533272876,-0.044098308106594,0.338217719219032,0.293745858093293,0.3187720066088,0.147760962531924,0.123245390734908,0.151752742109324,0.304201799039272,0.250586710453292,0.269209619708056,0.135799995890695,0.135799995890695,0.135799995890695], 1), + LabeledSample([0.231647849319649,0.0870580778073824,0.379819510930802,0.119683262892996,0.201958323330672,0.291859534446306,0.318243685177281,0.0818257194381086,0.177409914789709,0.142552607383287,0.38749726469097,0.206569993185964,0.474496045065283,0.159647804264275,0.159647804264275,0.159647804264275], 1), + LabeledSample([0.255247226696605,0.06041599445067,-0.00255968322164522,-0.0557623660678594,0.217229013118827,0.294756572623486,0.393362717775018,0.0682465918139527,0.160216510643726,0.0927370291446394,0.481064257912117,0.285307935542058,0.446167701011294,0.171719265613694,0.171719265613694,0.171719265613694], 1), + LabeledSample([0.275026515547499,0.0727400792013697,0.128762793923213,-0.00279155677458797,0.374437692908058,0.396130353966779,0.375086796481439,0.127101363132371,0.397509763863738,0.245200476434608,0.159660362687242,0.358672446067021,0.187892640693357,0.116812751455572,0.116812751455572,0.116812751455572], 1), + LabeledSample([0.311461752837503,0.502549497625733,0.0136433770988428,0.163056233779351,0.0668046663611401,0.272607631039843,0.350031792999902,0.0420992128778312,0.1808955506979,0.136019537594797,0.166608583769084,0.34215084378215,0.397280719417458,0.149116970955777,0.149116970955777,0.149116970955777], 1), + LabeledSample([0.335978144067938,0.0983210378580677,0.298097188286758,-0.0620608660613614,0.180728542341856,0.415699424320707,0.413265127196523,-0.0999159645032936,0.203949116092578,0.189765659712172,0.106979744949575,0.395413251109613,0.336768787203074,0.115010614246019,0.115010614246019,0.115010614246019], 1), + LabeledSample([0.267254400330717,0.530864539992977,0.148894591250536,0.0799552165600163,0.265475033620942,0.336749597682553,0.340364890972572,0.128256883977383,0.249894873995414,0.144049494069581,0.0831136106174955,0.343047025915129,0.164850715568234,0.14950754712276,0.14950754712276,0.14950754712276], 1), + LabeledSample([0.409535358236058,0.423821376417793,0.209554705754144,0.132754750116135,0.302864073191331,0.3178875886134,0.26924524714014,0.0733184371664153,0.0732958296204928,0.178502810621827,0.101371504171095,0.346751724293268,0.334343976936978,0.116814815828549,0.116814815828549,0.116814815828549], 1), + LabeledSample([0.411838215785579,0.329697985039249,0.101980670189617,-0.0118834750397091,0.181041987677738,0.391008260201782,0.328985525228739,0.0305415683419268,0.326811408573422,0.141709171615972,0.208590524286652,0.345418151382434,0.302489845499348,0.108179366027653,0.108179366027653,0.108179366027653], 1), + LabeledSample([0.365420884131811,0.0721780607230944,0.0113078420011285,-0.0135763788382327,0.194210964330357,0.405316516129519,0.414348135414213,-0.0504648498369757,0.357633344975218,0.257048685083646,0.13808777164507,0.366864443994308,0.3020895160535,0.123590452351992,0.123590452351992,0.123590452351992], 1), + LabeledSample([0.4006706465907,0.123743705191569,0.191332850350624,0.13232426073567,0.214876890555312,0.302289422501197,0.325178974246259,0.103811797461618,0.424125864699886,0.259450220277008,0.255979506564098,0.215885956908381,0.335711269129357,0.121012196145575,0.121012196145575,0.121012196145575], 1), + LabeledSample([0.165010086109523,0.0737493375917614,-0.0379955147007807,-0.188436093151012,0.0741761269601386,0.276461086643119,0.361942753242732,-0.0987966298943638,0.552050688643421,0.316782308151838,0.276026551516262,0.240036696328028,0.360509719502187,0.113722177892932,0.113722177892932,0.113722177892932], 1), + LabeledSample([0.322538934941513,0.0620906870973839,0.30117864344653,0.0462124741572112,0.0975246212047387,0.208157123222826,0.317250883177015,-0.0333862138801274,0.372973198362888,0.200289943745998,0.361594501059424,0.347288303861623,0.41855376578972,0.113862402088516,0.113862402088516,0.113862402088516], 1), + LabeledSample([0.285886366617401,0.0684810811583678,0.171380327231864,-0.0984477110534525,0.263309088994922,0.509940512787294,0.390809494758036,0.0871231538320202,0.224056703303123,0.27312555178283,0.176819633724361,0.286947787961415,0.33981923310787,0.101558439774049,0.101558439774049,0.101558439774049], 1), + LabeledSample([0.297296105273249,0.0946255708393242,0.334838488001863,0.0395700727110477,0.327258215505474,0.333258754902954,0.272944821496624,0.181405049197193,0.187812762259515,0.240783305911936,0.271469879527389,0.23461272684689,0.453599279540649,0.109042577429231,0.109042577429231,0.109042577429231], 1), + LabeledSample([0.269540789063922,0.0742425940934737,0.217552306967554,0.208824725336092,0.355277219636782,0.339660238719292,0.28494135945769,-0.0599238355949813,0.421647315225907,0.249123651029931,0.17789873992421,0.269695620986192,0.347813079058712,0.114482784095866,0.114482784095866,0.114482784095866], 1), + LabeledSample([0.404625688519658,0.0700267657079979,0.120472577645772,0.00219084178798748,0.379823180206969,0.402241148897651,0.321584301076104,0.0192020687698115,0.300130523389213,0.259466069761067,0.149793101053706,0.336182443721132,0.273109601918351,0.114834162473097,0.114834162473097,0.114834162473097], 1), + LabeledSample([0.227072284249524,0.115957953910278,0.37165420977739,0.0283164248068712,0.202551129726376,0.369900783939047,0.378352967053039,0.0898969763675667,0.202299134157373,0.243962099768371,0.256021152092969,0.238164818457677,0.447570516715675,0.121031883725162,0.121031883725162,0.121031883725162], 1), + LabeledSample([0.290513586918522,0.0288038623450319,0.232722173051494,0.021033401910653,0.251796587067657,0.434955256846487,0.412889359036101,-0.0703239873536929,0.290647100154398,0.27851991038108,0.109312082995173,0.223464411693962,0.414532651593511,0.110132685819811,0.110132685819811,0.110132685819811], 1), + LabeledSample([0.0273732600379447,-0.02558983433752,0.146982261055396,-0.0171273637178906,0.245008029292831,0.145781322353099,0.0150027880856774,0.690117179315347,-0.235663375971501,0.0411885264630709,0.504871112162429,-0.0657043501474764,0.171917154578638,0.155916290692338,0.155916290692338,0.155916290692338], 0), + LabeledSample([0.14521829713485,0.0343145533427399,-0.0882700952843158,0.046881072753417,0.294417347555922,0.173933905048393,0.11006649526536,0.656276502917861,-0.195721607174358,0.371766803766992,0.311380637843488,-0.117177743753181,0.0182118464611618,0.200382095730729,0.200382095730729,0.200382095730729], 0), + LabeledSample([0.486150877899874,0.00559857896218937,-0.185459353006127,0.142963157333992,0.163338134268417,0.199595799090423,0.207347908114734,0.163595452807656,-0.132850998237727,0.115223299672758,0.593717710005564,0.227279405422781,0.168328497429296,0.190701302816799,0.190701302816799,0.190701302816799], 0), + LabeledSample([0.0347134978427943,-0.0176692432854786,0.0428663618372316,0.166147230327836,0.0199498801884673,0.587805806576238,0.414442262124174,-0.089951397936977,0.310786812212336,0.14386213316866,0.429791189469868,0.285833283598367,-0.00578702083956967,0.140870696057391,0.140870696057391,0.140870696057391], 0), + LabeledSample([0.0386819482458521,-0.00349076506069984,0.381811856855455,0.124425737980637,0.198577221607227,0.276458187744415,0.361810145149105,0.225008209783446,0.430255529024555,0.175415277085379,0.449550455932021,0.135796133960852,0.171703541737192,0.156975047542789,0.156975047542789,0.156975047542789], 0), + LabeledSample([0.20899462334405,-0.0376063196642258,-0.287590562504126,-0.072418354993873,-0.104348662777656,0.404167598140499,0.410032520012738,0.0224411784874926,0.4361911298395,0.210242686845805,0.325742509559517,0.357264669291933,0.0428954946477981,0.134205015481038,0.134205015481038,0.134205015481038], 0), + LabeledSample([0.046621360661699,-0.0139688049328171,-0.183993426989613,0.271925965129567,-0.147104300632575,0.202242593318163,0.261960196367961,0.0534138805587944,0.0175423400165548,0.221129648306528,0.459211673437115,0.619552478506155,0.0671293292709748,0.189193942866217,0.189193942866217,0.189193942866217], 0), + LabeledSample([0.314015665359685,-0.0592175725353615,0.218693323692968,0.0511307060843809,0.398061338436536,0.320739392818663,0.0628993958736532,0.560252330340949,-0.237506347153332,0.0319676779103333,0.288240416391407,0.0175050328255241,0.225444912746704,0.15982819660882,0.15982819660882,0.15982819660882], 0), + LabeledSample([0.0235926642263124,0.111245261182021,0.128500316085317,0.401319090478726,0.400095170724106,-0.267283460172638,-0.000649706679643489,0.322976171567117,0.26186531561982,0.0277197647424217,0.246130186898388,-0.0259347325599058,0.436765374770623,0.225321555758923,0.225321555758923,0.225321555758923], 0), + LabeledSample([0.327691388188431,0.0414153506641207,0.342641752798106,0.426723895901174,0.00448419769970425,0.334360409171912,0.295483669866123,-0.0463655278288402,0.25051461407531,0.038010598277653,0.445600271509613,0.297644597218037,-0.00965967754349107,0.113551047440284,0.113551047440284,0.113551047440284], 0), + LabeledSample([0.422832528521326,0.105823154490609,0.138370598941533,0.630949920317877,0.0268473229561604,0.105291751147652,0.219543560563568,0.0535214977909612,0.0127504537131134,0.107245196764506,0.294587317177946,0.349815971415001,0.0355757901953409,0.18957512708764,0.18957512708764,0.18957512708764], 0), + LabeledSample([0.116234412828698,0.0493659607663372,0.228639262369261,0.510574000617481,0.471285231806449,0.322077243645769,0.228602419449592,-0.0352948257915518,0.21050837692503,0.0272710878325413,0.312245752193203,0.28658980023543,0.211279670444006,0.0864384517016141,0.0864384517016141,0.0864384517016141], 0), + LabeledSample([-0.230097079442458,0.191575291088469,-0.221273190981361,-0.024993826160744,0.264590786561978,-0.0107999042204516,0.176518708860926,0.247567524019548,0.0848265496512535,0.137473935828681,0.708369840105813,0.122490218181156,-0.00134001086000318,0.227527407567209,0.227527407567209,0.227527407567209], 0), + LabeledSample([0.286238576503558,-0.0853526847535509,-0.422558143311451,-0.0222125055212697,0.00798531641040375,0.143913549631955,0.288568394280582,-0.0127390882156346,0.235003191664602,0.225961949494622,0.579090368332931,0.249251651644526,-0.0332123980409943,0.202208087882306,0.202208087882306,0.202208087882306], 0), + LabeledSample([-0.138505618757149,0.429011756390478,0.135357513344437,0.149953928005861,0.539033862170768,0.0396351468199451,0.0843831548165318,0.424865680367432,-0.0272934605640143,-0.0267253148275226,0.285874188645398,0.262900953134214,0.0621739904835329,0.200026426154027,0.200026426154027,0.200026426154027], 0), + LabeledSample([0.0224877987223024,-0.0391969361451446,-0.104125007708806,-0.0801276173941065,0.659130786498865,0.0768619382257438,0.150151104777339,0.154114867140806,0.572869061092601,0.0445206947102069,0.26295955953503,0.113423019904689,0.180675371790576,0.128089024024065,0.128089024024065,0.128089024024065], 0), + LabeledSample([-0.0680713313894235,-0.0659572584062495,-0.0910223567594807,0.196293086038722,0.00657243295002523,0.293110509849117,0.291706321935055,0.104459472373553,0.180709201308787,-0.038253343288553,0.673873365895924,0.422659657301139,-0.110795904520421,0.166430361836103,0.166430361836103,0.166430361836103], 0), + LabeledSample([0.154066808923888,0.133666304199433,0.0919500790875897,0.194442098264837,0.00702220610633866,0.225818299748987,0.379643742496216,0.029734242478354,0.347024253511723,0.118848713104271,0.475971046401783,0.458845965840934,0.222663411373826,0.17781973769107,0.17781973769107,0.17781973769107], 0), + LabeledSample([-0.0412832096727263,-0.0193723491657681,0.334583955863749,0.514041914199186,-0.120088975443408,0.127166135539843,0.122010212151076,0.274722145723124,0.155901468269603,-0.065226883999393,0.557923375925209,0.266075448964717,0.136329230955618,0.154448963498702,0.154448963498702,0.154448963498702], 0), + LabeledSample([0.209152318262746,0.489509227021142,0.166558864454062,0.386824290404889,-0.0090287938267186,0.00603222107702054,0.115910607692672,0.60482754083883,0.226248773221092,0.179573248602189,0.0981144078333417,0.0393351999435281,0.0543152687428254,0.144144262359866,0.144144262359866,0.144144262359866], 0), + LabeledSample([0.146186351364834,-0.0599352177829581,0.137174545434296,0.140890443318893,0.256937704693468,0.238665823116693,0.24425412536107,0.117957710687414,0.218416932214424,-0.0287272780306666,0.585109848781112,0.492626674133014,0.0170807183110501,0.187936469761787,0.187936469761787,0.187936469761787], 0), + LabeledSample([-0.0212751800674024,0.0836439194336721,0.186747903160102,0.476487066856997,0.0758988721598699,0.0562669129630701,0.160218896806228,0.359848645202163,0.240032469855216,-0.0443743970083499,0.633126267164003,0.132720714645898,0.048933326900439,0.169416457441109,0.169416457441109,0.169416457441109], 0), + LabeledSample([-0.135415771949136,0.0751572759773452,0.362094316487182,0.551101202585419,0.0320765927245714,0.0848446078532958,0.1095484360509,0.234077186184729,0.112730289622787,-0.0218042564883484,0.516421980073012,0.35838738679105,0.0767953381865317,0.131598341625362,0.131598341625362,0.131598341625362], 0), + LabeledSample([-0.188460509151474,0.193401975555393,0.342146668362078,0.413536016302811,-0.0296656431400048,0.128142138561582,0.170273453960699,0.48605573213843,0.15881824790758,-0.00672874339900442,0.302244543532862,0.321206300340771,0.198525603613385,0.18004820867773,0.18004820867773,0.18004820867773], 0), + LabeledSample([-0.053714274484023,0.156242047612459,0.232205043379324,0.193832368392874,-0.0946801554375718,-0.0140257551300867,0.135988244942012,0.635024922675372,0.325654714375167,-0.0590818970692274,0.437621371413818,0.161532484103856,0.0447833885724345,0.200956129257143,0.200956129257143,0.200956129257143], 0), + LabeledSample([-0.0742634161790091,0.0662236988168258,0.306399592365841,0.448243013125216,0.00717029200860015,-0.0734859433802067,0.058602185896295,0.53196312467187,0.289615015487018,0.0864075791618671,0.361426640278499,0.286843185709196,0.0165020653068763,0.181569641339172,0.181569641339172,0.181569641339172], 0), + LabeledSample([0.151644500925181,0.0728178984707065,0.155936965367273,0.356144317534378,-0.106893503746568,-0.104609511219261,0.115619398241339,0.717997739810603,0.292668707947211,0.0229389859916003,0.266486361251246,0.0699209184017578,0.0538563063648312,0.186460745940225,0.186460745940225,0.186460745940225], 0), + LabeledSample([0.017251058258794,0.34061380107735,0.102725067313048,0.12351280000404,0.0401586689477961,0.301198241391264,0.286439164137179,0.0085850903260407,0.413825461579023,-0.074865004679771,0.430726677111447,0.47896687514064,-0.10098443502038,0.164756097406929,0.164756097406929,0.164756097406929], 0), + LabeledSample([0.0384095923980801,0.0342440150682099,0.0468913629984613,0.10695304663162,0.707428075964548,0.175903871921294,0.16022417436721,0.0777925992961381,0.525640169223502,-0.0150248996627851,0.242728786479052,0.139555500489966,0.202504809321555,0.090681921659038,0.090681921659038,0.090681921659038], 0), + LabeledSample([-0.102766049465399,0.158724029733518,0.467244884595389,0.310654099786137,0.681122886039951,-0.00962012483589027,-0.00625183963419078,-0.167340922687806,0.195285550280335,-0.0316805383269013,0.1883920491779,0.107978882227297,0.118631768157831,0.137833794477422,0.137833794477422,0.137833794477422], 0), + LabeledSample([0.019453115347372,0.043797394473083,-0.121376435441619,-0.0204086342530653,-0.0116371676846372,0.381129313559749,0.388762239761525,0.0952229866709675,0.362572432076403,0.00497650745663331,0.578417281517633,0.327650151753602,-0.00109418187359779,0.18578682646359,0.18578682646359,0.18578682646359], 0), + LabeledSample([0.0342405440321611,-0.0281824199791365,-0.00207123863927224,0.134025669553407,0.0338688736281274,0.586002464956269,0.536669558217335,-0.00875392381573298,0.334857918368746,0.146359507633643,0.267924904020169,0.25356443636761,0.140986680270676,0.138951404235846,0.138951404235846,0.138951404235846], 0), + LabeledSample([-0.0459776013701822,0.189206877746516,-0.368607134739602,0.0919901953133355,0.200031712125804,0.229964841814475,0.279570519661187,0.0287630387813438,0.173629318503823,0.0487512267493663,0.578448189961138,0.380632750540371,0.212363975757494,0.172011646673697,0.172011646673697,0.172011646673697], 0), + LabeledSample([0.171749946888126,0.0382054996412482,-0.283403382645723,0.253363284763606,0.0640260116010568,-0.106311359911027,0.125687986849022,0.134631138413371,0.231702239325847,-0.0688011193206196,0.506487555546021,0.479340403307816,0.153286206420282,0.262674936204522,0.262674936204522,0.262674936204522], 0), + LabeledSample([0.0336575027844327,0.274658266727067,0.330919770580883,0.355063482285362,0.220291637262679,0.361125499634068,0.268237175708898,0.189478160002826,0.135870102624656,-0.00650795237187516,0.0750829255153591,0.53469603668777,0.00663460638019215,0.174140266203267,0.174140266203267,0.174140266203267], 0), + LabeledSample([-0.17022115141536,0.148644983823566,-0.277201667417559,0.326078507065425,0.00923541000562605,0.453646179918048,0.20461185897608,0.335220303446187,0.247972853926978,-0.119771170340383,0.305058603820466,0.269169352595279,-0.0147508749961993,0.233863569340058,0.233863569340058,0.233863569340058], 0), + LabeledSample([0.0484946781518587,0.246997373535612,0.128767048138348,0.357379244643778,0.0648542975285742,0.0157552855302677,0.167648001620963,0.690742807613556,0.153497250272026,-0.0146978241744509,0.116595475501532,0.420845818394984,-0.072810427233968,0.144763486260763,0.144763486260763,0.144763486260763], 0), + LabeledSample([0.00724477043421416,0.138798808762658,0.0195824755550168,0.252432476912075,-0.122697772506,0.00895679579556293,0.305437969275691,0.306789523059073,0.341389863690145,0.0743914115188658,0.359287780985842,0.565392249180727,0.0759412712591187,0.214029079268603,0.214029079268603,0.214029079268603], 0), + LabeledSample([0.170557475573451,0.132738292033316,0.185573118907713,0.528272513717373,-0.0324344546741652,-0.110439744394134,0.205675203400676,0.531421231582964,0.313992645721549,0.0557916662885854,0.183107137106059,0.218531534014374,0.0507963922157862,0.196852818459459,0.196852818459459,0.196852818459459], 0), + LabeledSample([-0.00371590736778159,0.0155916049981286,-0.166812891661853,0.227158091846741,0.125624617211269,0.31339598835668,0.277583346796089,0.320412189049738,0.527899928421857,-0.0195546380052403,0.155123798558197,0.449788649460149,-0.0994118874493321,0.192599771000564,0.192599771000564,0.192599771000564], 0), + LabeledSample([-0.145401127281845,0.345107148052225,-0.160194422185549,0.148460286249223,0.232561585933487,0.368208351419031,0.234862362090247,-0.00670796080071491,0.69852658030737,0.0028520716799073,0.0127007121252476,0.202998608065508,0.0621348328391241,0.106475746469619,0.106475746469619,0.106475746469619], 0), + LabeledSample([0.107473565308119,0.325572766950317,0.0750219250582412,0.427881638813811,0.0511510625860875,0.430501117120413,0.370786722414011,0.0350908396323849,0.115642656346118,-0.115553062312517,0.221380050659666,0.387235464354543,-0.0963171493402314,0.209853804189988,0.209853804189988,0.209853804189988], 0), + LabeledSample([-0.105163594933397,0.236247321829009,0.548103393579171,0.202874910831642,0.223862494933951,0.0334263541064524,0.179916507439689,0.514487206885038,0.014899877628873,0.0761741154009069,0.392506736796903,0.160552241706305,0.0985266980933294,0.12607261803061,0.12607261803061,0.12607261803061], 0), + LabeledSample([-0.260684457823998,-0.112668045693411,0.378814930734517,0.365521021903169,0.0436961696223554,0.339738773764387,0.250756320579299,0.360140958998214,0.199214292163835,0.0255047188665542,0.412757961642129,0.15874309551013,0.00367568909960073,0.179269148286986,0.179269148286986,0.179269148286986], 0), + LabeledSample([-0.241748520287807,0.0414482707742813,0.058763595432437,0.256128316868812,-0.00711817390653469,0.210290393383608,0.184701209154332,0.359108538645612,0.291225483966834,-0.0698659966847082,0.694052272792579,0.230583568583409,-0.0111670343359772,0.113641306557511,0.113641306557511,0.113641306557511], 0), + LabeledSample([-0.153882847897414,0.0662016195816666,-0.12687324761113,0.412896588533035,0.00834896945150008,0.164629397167468,0.1729995904174,0.132694992919367,0.283389025047269,-0.123195679919954,0.288965585286903,0.627566808270024,0.0610644447721535,0.211416688057106,0.211416688057106,0.211416688057106], 0), + LabeledSample([0.0591785841682163,0.0872184373644866,0.0819503684103717,0.474073740755973,0.403890028673706,0.145450761132863,0.267110751972313,0.192215974310117,0.27278162753235,-0.0904730522376757,0.362665772366751,0.390910938027966,-0.0655387164868738,0.176656459691106,0.176656459691106,0.176656459691106], 0), + LabeledSample([0.224889272955833,0.669095465403597,-0.153846675880603,-0.0258682875936601,-0.134999621099271,-0.000661472488168805,0.0760044874046187,0.50018761028079,-0.104088082113815,0.0818499974610029,-0.0453545371648244,0.117158252403706,-0.0593961618189751,0.235487931160887,0.235487931160887,0.235487931160887], 0), + LabeledSample([-0.209103241158248,0.228669181795995,0.255088683830861,0.243250541631366,0.160349228073623,0.428236250356188,0.37837723230132,0.129677256434085,0.318097577546011,0.0379932474265926,0.0651760786736684,0.467232175843078,0.130104265959122,0.151163258655871,0.151163258655871,0.151163258655871], 0), + LabeledSample([-0.0949455004501372,0.0847859117075642,0.479085360309945,0.420399017918024,0.272405231651456,0.276274767241364,0.458999158117131,0.206476799663682,0.176254495135719,0.16103521244277,0.0992290020450245,0.29587858257189,0.0125324001646888,0.0798910778622389,0.0798910778622389,0.0798910778622389], 0), + LabeledSample([0.0390776074332029,0.481914025011933,0.389235123540663,0.513576685188268,0.195221567237419,0.148139913148615,0.182988505577128,0.247774833808009,0.209830507186548,-0.0582455614924274,0.137611964957276,0.296245087249184,-0.0330146696405289,0.116652195711977,0.116652195711977,0.116652195711977], 0), + LabeledSample([0.175361217812057,0.712851209890314,0.0174872350745221,0.272389026452382,0.00477267472932331,0.26681797174273,0.278559739216107,0.0758548756338079,0.309714392479613,-0.0200243808111048,-0.000661039907090557,0.301985034467809,-0.0262299830144248,0.120856003882741,0.120856003882741,0.120856003882741], 0), + LabeledSample([-0.0515550360938761,0.217638869678391,0.0646674386583323,0.368825641431328,-0.0113304374665257,0.391278781291545,0.416931222704819,0.25928777642477,0.159560687159559,-0.0114024132991685,0.14569242322842,0.518482157017735,-0.0408508589759248,0.180889893176408,0.180889893176408,0.180889893176408], 0), + LabeledSample([0.0590584496010962,0.0653911498014063,0.166756993142791,0.377389338957649,0.00661244171476956,0.414536345971713,0.504472951325259,0.278561769633665,0.326774465916968,0.116210711360602,-0.0426937852259151,0.329492903733875,-0.0569648157818169,0.167443483345846,0.167443483345846,0.167443483345846], 0), + LabeledSample([-0.0917905323242946,0.136856969328066,0.423421071914423,0.618451685458719,0.0766586823653196,0.130882678776999,0.202666553402918,0.452561937469748,0.226369730901658,0.0106885068356884,0.175300271549197,0.135271218254247,0.0189534851807265,0.117528275878727,0.117528275878727,0.117528275878727], 0), + LabeledSample([0.0410725034645433,0.0865908543801398,0.174860032199159,0.590552204556892,0.0406266742761109,0.21911007344103,0.336973706175648,0.296470940048156,0.380450847672745,-0.0789455576012649,0.165434239058747,0.30756133466462,-0.0640356262452383,0.16667614937775,0.16667614937775,0.16667614937775], 0), + LabeledSample([0.231247021143954,0.0328575941432753,0.239771237867592,0.155559724420754,0.771104178545808,-0.0561824850255899,0.0669726177113016,-0.0847287418745489,-0.0335975478083005,0.165314845980509,0.037694955276207,-0.248462360894268,0.183160019410365,0.20750410571554,0.20750410571554,0.20750410571554], 0), + LabeledSample([0.241553862856675,0.477659290517776,0.332149184931846,0.338049550466755,0.394475485720108,-0.155382829819071,0.0588793688880362,-0.0132346547036292,-0.0928549679630124,0.342575719203705,0.0119545925013105,-0.195772238397477,0.0930197904312452,0.210074235777643,0.210074235777643,0.210074235777643], 0), + LabeledSample([0.195334098839262,0.265936942134869,0.301061082717328,0.633735089605007,0.240875720732619,-0.204614328497058,0.0183220768801773,0.0537577549439433,-0.0841640397111417,0.347161086293148,-0.0841816583990839,-0.200778379378044,0.109441334344513,0.190411958672153,0.190411958672153,0.190411958672153], 0), + LabeledSample([0.154448016907517,0.556854469068016,0.254312229828219,0.418896273907283,0.386968433915166,0.0285279899342927,0.0468296875626629,-0.161466370113012,-0.0774192519320793,0.255390569404737,0.0569494729175185,-0.222545610887232,0.139527712855628,0.185859652149587,0.185859652149587,0.185859652149587], 0), + LabeledSample([0.107628604769279,0.261746278972036,0.0851180912768023,0.158772315991637,0.140988491850029,-0.00413784948643965,-0.0843738206441276,0.728586430979596,-0.026652050091825,0.447966709849224,-0.000900345468261859,-0.0996191659016783,0.192361642089446,0.164607543721497,0.164607543721497,0.164607543721497], 0), + LabeledSample([0.00486083688270077,0.64696412497406,0.334004131433736,0.292796584476603,0.0496680469899061,-0.0805643593285377,-0.112236101061606,0.470313172913834,-0.0744431293863573,0.0913718514970689,0.017129180316594,-0.177811231073042,0.183606006071121,0.143601574670483,0.143601574670483,0.143601574670483], 0), + LabeledSample([0.0593012658050308,0.618282924898504,0.319860123970287,0.420597881670929,0.118722187593532,0.039670437030364,-0.066875474833392,0.495384719941719,0.0274768048222377,0.153791027572778,0.0622186086857226,-0.0425934013405588,0.0421731300357924,0.111920917655756,0.111920917655756,0.111920917655756], 0), + LabeledSample([0.35706949199178,0.487907587838099,0.0742654982178638,0.223215994867022,0.0390214609130473,-0.00402430294131187,-0.122856975826827,0.616454988141533,-0.0503794065110105,0.29187878481142,0.0790110377216966,-0.0184265802938717,0.106098116652602,0.160090555379413,0.160090555379413,0.160090555379413], 0), + LabeledSample([0.199679080715689,0.329178457633655,0.5122675888719,0.328753885712996,0.171034892353202,-0.0328548041171417,-0.11917098819282,0.528070651417679,-0.104113418370725,0.180177634599645,0.0393474158856868,0.14277539827214,0.121042935048185,0.161236770407746,0.161236770407746,0.161236770407746], 0), + LabeledSample([0.399234374525672,0.343140964574117,0.262499129603519,0.321200298372581,0.0690376898799216,-0.103069220489572,-0.147528123918692,0.28611202467628,-0.184796038719915,0.351113212604546,-0.0384432817619214,0.241965234417974,0.30789080407244,0.199603599883522,0.199603599883522,0.199603599883522], 0), + LabeledSample([0.326297251446263,0.358072912503018,0.469179487893646,0.44486497839985,0.166165578623474,-0.0259256728784585,-0.101564268085728,0.418637721491455,-0.165035262442682,0.13675622140151,0.141824742886277,0.0535821051312442,0.0569851850404429,0.14288941736166,0.14288941736166,0.14288941736166], 0), + LabeledSample([0.359120502088139,0.700805409310182,0.191928580932408,0.234848464278066,0.0951917123491453,0.120161856543908,-0.0458786085683259,0.37718388873341,0.00609959082291796,0.144359407159952,0.163061132284603,0.0517465740033583,0.0774619267888505,0.145941953150503,0.145941953150503,0.145941953150503], 0), + LabeledSample([0.291195303142619,0.534655572500114,0.0444255612049878,0.360745440250476,0.296093533980492,-0.0518541691097636,-0.118242939814779,0.375801337996739,0.133539219783322,0.129603302294616,-0.144436062691931,-0.0709454867315089,0.311825750270044,0.176926972586679,0.176926972586679,0.176926972586679], 0), + LabeledSample([0.413805146571906,0.69664978134007,0.0958556630778741,0.227945149088853,-0.0812057308285977,-0.205986954415187,-0.136784207696217,0.251959916491281,-0.116715813654797,0.185556559428288,-0.133310433179743,-0.158041650685777,-0.00893465922483392,0.141652023876218,0.141652023876218,0.141652023876218], 0), + LabeledSample([0.158291913315117,0.61007562639357,0.280571854551608,0.31512409624496,0.00552145888671505,0.021460818836787,-0.0736463801656978,0.361354290332944,-0.0511199237943532,0.429838510746757,-0.166468560337299,-0.00467256799448173,0.120338758614208,0.13981708255778,0.13981708255778,0.13981708255778], 0), + LabeledSample([0.294612172286633,0.396460478068849,0.225195794572686,0.344173187654909,0.0996035097194735,0.101862847125905,-0.0566530274920627,0.359509762226072,0.0958157698038399,0.544901562229702,-0.172288930624776,-0.0799430591765561,0.148225261470161,0.15270584371661,0.15270584371661,0.15270584371661], 0), + LabeledSample([0.280112301373465,0.304198697282866,0.342831597197724,0.414689543234349,0.475740782606101,-0.0643383875261756,0.0966644031984621,-0.0365339289604027,0.0781791396244117,0.458991625305328,-0.109488356402631,-0.146646948787543,0.0387285214557376,0.124597563656019,0.124597563656019,0.124597563656019], 0), + LabeledSample([0.133853155365226,0.251509825278989,0.271201745583724,0.333640610110787,0.364197773553594,-0.0456447486371128,0.0646671540303931,-0.00851428995732382,0.0882401692122618,0.688613848871636,-0.211489063016041,-0.112147850666851,0.0301178737450273,0.13514768583147,0.13514768583147,0.13514768583147], 0), + LabeledSample([0.207271251693883,0.115500830559941,0.458273039159486,0.528782719588034,0.413047389853592,0.169025360544143,0.0486111900689069,0.0222561283164266,0.188576295848358,0.362216900330744,-0.100353981302315,-0.148498298059799,-0.00254032130971227,0.133098359648033,0.133098359648033,0.133098359648033], 0), + LabeledSample([0.217169576879935,0.328067883566794,0.115674032839654,0.118355240622035,0.15522485184217,0.0462327273246561,-0.0333619921336167,0.51486888362252,0.270710043500941,0.603172249037944,-0.130786950489933,-0.0917999683842816,0.132598808030938,0.122705052979048,0.122705052979048,0.122705052979048], 0), + LabeledSample([0.194334002827861,0.597116623596196,0.134983141140157,0.288389492055361,0.0881256370367682,0.0283641866903432,-0.0648397385418245,0.490166173851522,0.162535151452483,0.377869101053486,-0.0953653778297959,-0.0945519515505388,0.15193019485309,0.116817831188036,0.116817831188036,0.116817831188036], 0), + LabeledSample([0.382049935071895,0.500121566084578,0.197658523253673,0.186883314209102,0.0463584394822235,0.0505007692652614,-0.0364418533031411,0.361833015451648,0.189900157465683,0.52900592480994,-0.134500290724354,-0.0646934845425211,0.0298694084733732,0.13403274967237,0.13403274967237,0.13403274967237], 0), + LabeledSample([0.0454604271711028,0.272358209831245,0.334733475377532,0.540826974410863,0.136204453040618,0.0702829081012277,-0.0733016984179067,0.518419885844916,0.0406840934257617,0.348805231475169,-0.0444755533797833,-0.0350069750344801,0.232188724319112,0.117125189815264,0.117125189815264,0.117125189815264], 0), + LabeledSample([0.335674295704234,0.0524705458011824,0.285448708675593,0.337637839798896,0.113656453675324,0.234464885964899,0.0366300952166488,0.294254582587596,0.388100051183445,0.584600415570291,-0.0901586894505694,0.0153442661214017,0.0911612850972567,0.0898452857524493,0.0898452857524493,0.0898452857524493], 0), + LabeledSample([0.0350118075919132,0.513951347877797,0.225756494796119,0.312134977782114,0.0373141409575358,0.22859045944533,-0.0221155204801056,0.44851185516087,0.014194363756837,0.470631206937074,-0.163168980713227,-0.0926464538757241,0.0610516676999834,0.153086055970812,0.153086055970812,0.153086055970812], 0), + LabeledSample([0.381787211515927,0.387300689840662,0.343559965487082,0.237693597788473,0.32262439324557,0.0655486033329805,-0.0966255085017079,0.432761090491005,-0.0765729419362027,0.286364640913038,0.211104496257327,-0.0170015668142093,0.162868701584034,0.147710005340067,0.147710005340067,0.147710005340067], 0), + LabeledSample([0.155945886743743,0.374673392583335,0.360990966123492,0.351037530117177,0.296055870377592,0.00595063273222534,-0.0849647675134255,0.580279283002605,-0.0157815952502789,0.248303270443573,0.123396039021296,0.0678399740301173,0.0879828660502218,0.142194650164373,0.142194650164373,0.142194650164373], 0), + LabeledSample([0.247099012520518,0.548529233743003,0.251602210677838,0.184535531479979,0.398332162863828,-0.103062467167249,-0.0872299639676222,0.340301314244126,-0.0309767493029768,0.297259141024446,-0.0607147335296098,-0.0493683988668593,0.206743625250552,0.191317613379529,0.191317613379529,0.191317613379529], 0), + LabeledSample([0.368840866448451,0.561256609671349,0.116156562908135,0.372733535848356,0.0338547729259747,-0.108936188248542,-0.108556407075164,0.438906088118794,0.0517821615670243,0.335214628337197,0.042558536063148,-0.0358406749243028,0.0553916100892464,0.138893554294146,0.138893554294146,0.138893554294146], 0), + LabeledSample([0.252099424680132,0.372340119658059,0.308780202110509,0.338356586210174,0.302660291604557,0.0179180881380311,-0.0168642544568776,0.247953299406305,0.135669071925625,0.58581498194311,0.0867401818600983,-0.0754154369541946,0.136418841562815,0.116736217174449,0.116736217174449,0.116736217174449], 0), + LabeledSample([0.160514905930226,0.42513020108958,0.161366460219075,0.214464679117517,0.0374916013451409,-0.0519491424865503,-0.0788413692144921,0.273592916298358,-0.0131545583493555,0.730434903489431,-0.0104356432008383,-0.0396908376594602,0.17298227627428,0.15381411000428,0.15381411000428,0.15381411000428], 0), + LabeledSample([0.312208384295773,0.227953784866742,0.408105848561651,0.46286958294019,0.258655167682807,-0.0237030251637096,-0.0336696067073982,0.217333697465091,0.178439191727198,0.486277022560919,0.00743422653111901,-0.0203721388066285,0.184272635196624,0.130639288369846,0.130639288369846,0.130639288369846], 0), + LabeledSample([-0.0121138629979706,0.51476715301201,0.255794413329181,0.261091726026612,0.234823356715931,-0.188453187961464,-0.166750701853882,0.393757950236957,-0.154216849192256,0.375200433991031,-0.0978687227599553,-0.0209595193756307,0.0785463020933106,0.221370967831489,0.221370967831489,0.221370967831489], 0), + LabeledSample([0.15164440851627,0.238159903687439,0.149692397812484,0.221404230777602,0.00627075628692431,-0.0855405218580231,-0.115115238684532,0.428670482407951,-0.147011261148233,0.717394074369177,-0.159345062630204,-0.0798861946499534,0.028401848780963,0.158791157818479,0.158791157818479,0.158791157818479], 0), + LabeledSample([0.479353368202618,0.232098810359438,0.281487506769595,0.225726645614505,0.0771685321261895,0.0152666061652778,-0.0639566924507687,0.314093910034917,0.0844431404637428,0.615734357611464,-0.0970141128064776,-0.0476545173116696,0.142328018096663,0.140273378582059,0.140273378582059,0.140273378582059], 0), + LabeledSample([0.30805302990898,0.670729182344314,0.217166727339904,0.214458524080411,0.112708704299647,0.0127958405947736,-0.0685871682117966,0.371527606547604,0.0168889271955423,0.365220269768971,-0.0666459323845712,-0.0327394701982652,0.160667936689163,0.11757136934001,0.11757136934001,0.11757136934001], 0), + LabeledSample([0.220644600430142,0.47201243250483,0.100246722942253,0.192892834023419,0.42096181212692,-0.0110428333219897,-0.0563507871054082,0.254608441867765,0.105735328552683,0.564623622127601,-0.105333595654027,-0.0774396103688061,0.213900912573913,0.119869452979611,0.119869452979611,0.119869452979611], 0), + LabeledSample([0.21584998670774,0.228078314592705,0.184442119623237,0.208794925485675,0.455665917504659,0.00542990534202975,-0.0628333846009199,0.424952422931548,0.15079523110219,0.536249827391934,-0.105923900281999,-0.0679262243981786,0.234388707705015,0.129751494551881,0.129751494551881,0.129751494551881], 0), + LabeledSample([0.362999772634019,0.39983821564658,0.365781340492783,0.382311657116397,0.114459892044237,0.100888832191619,-0.0400313439069322,0.390656550007052,0.0951796929833985,0.439028538569296,-0.0813569403810565,-0.060895058821727,0.0620183007783841,0.107902797379616,0.107902797379616,0.107902797379616], 0), + LabeledSample([0.391844552693735,0.0224505345360546,0.128147149095695,0.00959245823497324,0.147402239960384,0.36111662558123,0.382155151656564,0.0127068479152497,0.218005687475965,0.324055673723351,0.222576956644604,0.364244640009045,0.389785541991087,0.121679173532834,0.121679173532834,0.121679173532834], 1), + LabeledSample([0.356888874277264,0.0720162195877543,0.136996189058879,-0.140909810519905,0.136677215809854,0.32796172094906,0.334588448755052,0.0813885300036874,0.383135189111009,0.242506953404048,0.26235074418908,0.164272964522333,0.51022872786026,0.0861361717713044,0.0861361717713044,0.0861361717713044], 1), + LabeledSample([0.356743024162847,0.299694889353212,0.311343800463449,-0.0332955728202061,0.365655578392927,0.281510298420893,0.372971600707102,-0.0625154967079762,0.168543951699275,0.190470770336807,0.190242024720102,0.330066123959551,0.26721886250174,0.126066127781679,0.126066127781679,0.126066127781679], 1), + LabeledSample([0.342818794892513,0.101052809398138,0.360430132766183,0.23819990844026,0.135958671633598,0.237320808409532,0.308254717340488,0.109542475248431,0.177557689560361,0.0866011436890782,0.336473881100679,0.468385700630219,0.281509738880536,0.132808149773745,0.132808149773745,0.132808149773745], 1), + LabeledSample([0.471825455298159,0.0805157625548287,0.102338403158557,0.0106143299846797,0.13783550627248,0.282993959641305,0.255288568929083,0.0787232630337918,0.283117434475593,0.174153574685115,0.272150764672242,0.413684819363966,0.424013222162737,0.134641493192199,0.134641493192199,0.134641493192199], 1), + LabeledSample([0.34641200164034,0.099744551278731,0.38776203448908,0.181916386282477,0.311546673657173,0.175216256169144,0.293174280061248,0.125937342360261,0.139917604026267,0.136174356784758,0.320610477987196,0.219965569994473,0.463069568846749,0.133283652542607,0.133283652542607,0.133283652542607], 1), + LabeledSample([0.404853350133791,0.366370427401772,0.233457215510936,-0.117646343734435,0.422400124731543,0.316834280568532,0.311952030312985,-0.0142353771929971,0.245689562716628,0.158495894279496,0.111287504351714,0.309881505730385,0.174043923126029,0.10490304431372,0.10490304431372,0.10490304431372], 1), + LabeledSample([0.25251240458689,0.100686161713017,0.0315578727081392,0.0718934503369092,0.291240948285929,0.427841821532188,0.412648557737393,0.116841462722393,0.314820284876223,0.209407419028952,0.141208499852881,0.368089079182708,0.330970427145695,0.141657365746273,0.141657365746273,0.141657365746273], 1), + LabeledSample([0.350364491958287,0.0688422581963099,0.109742569360303,0.0707531010098024,0.224015978099777,0.28190906228017,0.388953246261309,0.135993554782958,0.244051253148224,0.3726688048422,0.264003118925063,0.219311926596273,0.455200762754877,0.104203528797661,0.104203528797661,0.104203528797661], 1), + LabeledSample([0.397327283049911,0.0773527405250614,0.126236452283931,0.0330706981684607,0.408012616077325,0.260360655336735,0.364814158384692,-0.00206578487920926,0.163991130541285,0.253478466473427,0.165540214451542,0.30416373966438,0.433971873268533,0.132120542922879,0.132120542922879,0.132120542922879], 1), + LabeledSample([0.107297174901783,-0.0613762696733566,-0.632314571586212,-0.416852421444992,0.0508952503409911,0.064074731214031,-0.144536144592534,0.153765312304364,-0.303969426648252,-0.0375252361713336,0.388754164139066,-0.078967037248287,0.0668579337914334,0.186423454283019,0.186423454283019,0.186423454283019], 1), + LabeledSample([0.0337268344229458,-0.00184814410831371,-0.171994660095448,0.0487910037701937,0.715459227632856,0.00029799084750352,0.034974910385745,-0.0990313039755913,0.387197494235993,0.033304059568937,0.411585327770438,0.236608730976454,0.165595696405405,0.115678798001455,0.115678798001455,0.115678798001455], 1), + LabeledSample([-0.0223561749320937,-0.0207526232365009,0.176225433251552,0.326430295003409,0.219297804670973,0.590124842186138,0.236154622574307,-0.143389150319129,0.192871802948094,0.0710337180025261,0.249885838045267,0.317860451806615,0.343996729607254,0.146892033179513,0.146892033179513,0.146892033179513], 1), + LabeledSample([0.032885091939905,0.0461926134522565,0.34638380929578,0.203505193733194,0.264570166804038,0.0160341545388597,0.151445308394402,0.427311190150461,-0.0254549665189148,0.0510306773298033,0.692797615252923,0.0575883314039526,-0.0798332036067795,0.149101002314511,0.149101002314511,0.149101002314511], 0), + LabeledSample([0.142629866189693,0.410472062836346,0.207336198359131,0.378232988385718,0.183357859335752,0.316154994878092,0.273883001957771,0.322495142977759,0.249438841565115,0.0166674724899229,0.366212353243424,0.265767421147697,0.00611339840415559,0.122818414884191,0.122818414884191,0.122818414884191], 0), + LabeledSample([-0.0679233939259161,-0.072886846046031,0.477512324840591,0.174125713388414,0.155497662993969,0.17129901382616,0.318776243273762,0.0194230007106634,0.555783444679686,0.0737664378133595,0.0782223803712395,0.384101054511456,0.0667032731300068,0.185992205916374,0.185992205916374,0.185992205916374], 0), + LabeledSample([0.00999269298689842,0.0262030266834996,0.197155588375633,0.548367414525349,-0.232718880830273,0.151355729009093,0.127012275111873,0.411786294151836,0.109592670067503,-0.0518380454362711,0.363744240593521,0.381580392735396,0.15243777913074,0.164338283611199,0.164338283611199,0.164338283611199], 0), + LabeledSample([-0.131400682086754,0.171521987350065,0.191616648663646,0.162786759491588,0.210638981678084,0.534289546493504,0.311213752749673,-0.0862263866819605,0.103472561468339,0.0878652293863465,0.485065843433271,0.317875059263178,-0.102522755165598,0.173880513696912,0.173880513696912,0.173880513696912], 0), + LabeledSample([0.0506049151151188,0.287715578010986,0.0887170666404207,0.109273692933807,0.0318657512060172,0.306583832312144,0.338235987285259,0.334511582631667,0.477276067209907,-0.00128966778775066,0.519921454232635,0.183408801865957,-0.0302403142292243,0.116720585385637,0.116720585385637,0.116720585385637], 0), + LabeledSample([0.155537012760084,0.12469512047752,-0.0685853167040014,0.378983217011539,-0.0016059855847115,0.173834869196373,0.240415288132954,0.246327374233871,0.171038483773879,0.0635733414875313,0.381511309986817,0.572462983878879,0.223146044444717,0.188745574564891,0.188745574564891,0.188745574564891], 0), + LabeledSample([-0.106888248404423,0.0254982746554491,0.435809951544879,0.247866151735291,0.115539233295032,0.323105832423766,0.385789333857532,0.130580401233695,0.459264529749233,0.0247638879079171,0.20854914148463,0.334532005981271,0.169375895367498,0.138197555234402,0.138197555234402,0.138197555234402], 0), + LabeledSample([0.0100012070121382,0.0385909848160964,0.358005307896772,0.471274773272444,-0.0168340126254455,0.306861704693128,0.301875027533534,0.431885187577969,-0.024144046014302,0.0518235705697981,0.216614240137034,0.375408000771501,-0.0613295291494424,0.164478303953699,0.164478303953699,0.164478303953699], 0), + LabeledSample([-0.0156009038121407,0.536226598008622,-0.0510488844092566,0.262251476208149,0.0344260697722476,0.0761242352448198,0.163083120634928,0.366107933805052,0.353446929363601,-0.108649979874075,0.253047115727131,0.385416407008332,0.114718978391023,0.192142309706158,0.192142309706158,0.192142309706158], 0), + LabeledSample([-0.0370723732585497,0.462706141337283,0.194819679329056,0.264097858693995,-0.0344675209732017,0.315276508881407,0.351600930409003,-0.00185817840721325,0.514394278278426,0.030985137447176,0.0195438132713035,0.376880126062964,-0.0475330499513248,0.118842742281355,0.118842742281355,0.118842742281355], 0), + LabeledSample([-0.0016596349541218,0.631802778994405,0.258923690167821,0.431565727191915,-0.0777693312326763,0.107217718812087,0.165357588010325,0.407926583994051,0.0968775514246654,0.0247476673279324,0.068467607179257,0.172207746838557,0.111312979635777,0.16279792602384,0.16279792602384,0.16279792602384], 0), + LabeledSample([0.143710241385037,0.00813969645743141,0.166626922787307,0.125887427831217,-0.00148386915721329,0.0690924636334773,-0.132562337719547,0.813875842290254,0.0620432660245373,0.296664745393943,0.0286792231349211,-0.180653988476775,0.185389155342318,0.174393680319152,0.174393680319152,0.174393680319152], 0), + LabeledSample([0.194304705256292,0.30346843378612,0.374041666089729,0.48352572078128,0.205759387922832,0.170334766020302,-0.100580387509185,0.527373130970181,-0.0960922066168464,0.194750484480948,0.146211633441668,0.031454392555221,0.10170488371806,0.137823608782152,0.137823608782152,0.137823608782152], 0), + LabeledSample([0.0570260846145295,0.52764742101151,0.111869455738976,0.180967382649137,0.397178358074248,-0.193581548022407,-0.0718130900746043,0.148619617280605,0.014552586962639,0.503530729139833,-0.0748446558522674,-0.0107044561285139,0.341924057887159,0.157289053900933,0.157289053900933,0.157289053900933], 0), + LabeledSample([0.248098725379752,0.456746224327013,0.201739035375683,0.329290659490806,0.353941083012747,-0.155688188640652,0.0634239539879236,0.208825572195049,0.0125037152349941,0.520052128663407,-0.13353266010295,-0.188043841093254,0.0704360793068273,0.135144187394862,0.135144187394862,0.135144187394862], 0), + LabeledSample([0.154461249938759,0.0178069859154663,0.0362860191116435,0.292137716451667,0.273737481826492,-0.157722750938907,-0.123811269772362,0.623254105885853,0.116416845998361,0.467427306225011,-0.150509693940165,-0.155859335652357,0.164324900252044,0.162881127264595,0.162881127264595,0.162881127264595], 0), + LabeledSample([0.283967248673866,0.0653052200973657,0.31906371675043,0.319600990343548,0.0409193502319166,0.21981005179708,0.00324997177552983,0.413419912003247,0.317297662817938,0.571709617497126,-0.110212961327936,-0.0560612150021478,0.100443678932404,0.111543056863857,0.111543056863857,0.111543056863857], 0), + LabeledSample([0.437022295249671,0.277016772723886,0.170318463731936,0.376347873321372,0.0654029814959019,-0.18590177141098,-0.0863664388167373,0.338687920702156,-0.0242372506105056,0.560202129237804,-0.0221037853452648,-0.0993942479877979,0.123995050925897,0.141968301419256,0.141968301419256,0.141968301419256], 0), + LabeledSample([0.273452627665534,0.484246176255015,0.447645208017183,0.447862902118889,0.287256166753207,0.0390992590174893,-0.0208585770470468,0.0801713198810822,0.00235790979876691,0.382106195445461,-0.0395631261301076,-0.0257173942959117,0.108602367051486,0.11375808821732,0.11375808821732,0.11375808821732], 0), + LabeledSample([0.323768883858918,0.456898831708986,0.280009612342141,0.414792798825381,0.213720336482119,-0.0173248427920594,-0.0676281609721251,0.35366736131149,0.0994909525019752,0.364562841125483,-0.020970585691538,-0.12622312846499,0.216164946371533,0.13468998110011,0.13468998110011,0.13468998110011], 0) + + ]; + } + +} diff --git a/MachineLearning/src/Runtime/Features.qs b/MachineLearning/src/Features.qs similarity index 100% rename from MachineLearning/src/Runtime/Features.qs rename to MachineLearning/src/Features.qs diff --git a/MachineLearning/src/Runtime/GradientEstimation.qs b/MachineLearning/src/GradientEstimation.qs similarity index 65% rename from MachineLearning/src/Runtime/GradientEstimation.qs rename to MachineLearning/src/GradientEstimation.qs index 92b340a6e19..26fd7d039e4 100644 --- a/MachineLearning/src/Runtime/GradientEstimation.qs +++ b/MachineLearning/src/GradientEstimation.qs @@ -13,40 +13,25 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Characterization; // NOTE: the last qubit of 'reg' in this context is the auxillary qubit used in the Hadamard test. - operation _endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, reg: Qubit[]): Unit is Adj + Ctl { - let L = Length(reg) - 1; - let g1 = _ApplyGates(param1, gates1, _); - let g2 = _ApplyGates(param2, gates2, _); - - enc(LittleEndian(reg[0..(L-1)])); - within { - H(Tail(reg)); - } apply { - (Controlled g1) ([reg[L]], reg[0..(L-1)]); - within { - X(Tail(reg)); - } apply { - (Controlled g2) ([reg[L]], reg[0..(L-1)]); - (Controlled Z) ([reg[L]], reg[(L-1)]); - } - } - } - - operation endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl),param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence) : (Qubit[] => Unit is Adj) { - return _endToEndHTcircuit(enc,param1, gates1, param2, gates2, _); + operation _ApplyLEOperationToRawRegister(op : (LittleEndian => Unit is Adj), target : Qubit[]) : Unit is Adj { + op(LittleEndian(target)); } - operation HardamardTestPhysical(enc2: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, nQubits: Int, nMeasurements : Int): Double { - return 1.0 - EstimateFrequencyA( - endToEndHTcircuit(enc2,param1,gates1,param2,gates2), - _TailMeasurement(nQubits), - nQubits, - nMeasurements + operation _EstimateDerivativeWithParameterShift( + inputEncoder : StateGenerator, + gates : GateSequence, + parameters : (Double[], Double[]), + nQubits : Int, + nMeasurements : Int + ) : Double { + return EstimateRealOverlapBetweenStates( + _ApplyLEOperationToRawRegister(inputEncoder::Apply, _), + _ApplyGates(Fst(parameters), gates, _), + _ApplyGates(Snd(parameters), gates, _), + nQubits, nMeasurements ); } - - /// # Summary /// polymorphic classical/quantum gradient estimator /// @@ -96,9 +81,9 @@ namespace Microsoft.Quantum.MachineLearning { w/ gate::Index <- (param[gate::Index] + PI()); // NB: This the *antiderivative* of the bracket - let newDer = 2.0 * HardamardTestPhysical( - sg::Apply, param, gates, paramShift, gates, nQubits + 1, nMeasurements - ) - 1.0; + let newDer = _EstimateDerivativeWithParameterShift( + sg, gates, (param, paramShift), nQubits, nMeasurements + ); if (IsEmpty(gate::Span::ControlIndices)) { //uncontrolled gate set grad w/= gate::Index <- grad[gate::Index] + newDer; @@ -108,10 +93,9 @@ namespace Microsoft.Quantum.MachineLearning { w/ gate::Index <- (param[gate::Index] + 3.0 * PI()); //Assumption: any rotation R has the property that R(\theta+2 Pi)=(-1).R(\theta) // NB: This the *antiderivative* of the bracket - let newDer1 = 2.0 * HardamardTestPhysical( - sg::Apply, param, gates, controlledShift, gates, nQubits + 1, - nMeasurements - ) - 1.0; + let newDer1 = _EstimateDerivativeWithParameterShift( + sg, gates, (param, controlledShift), nQubits, nMeasurements + ); set grad w/= gate::Index <- (grad[gate::Index] + 0.5 * (newDer - newDer1)); } } diff --git a/MachineLearning/src/Runtime/InputEncoding.qs b/MachineLearning/src/InputEncoding.qs similarity index 76% rename from MachineLearning/src/Runtime/InputEncoding.qs rename to MachineLearning/src/InputEncoding.qs index 730fa2d98e8..1eb4b1db7d3 100644 --- a/MachineLearning/src/Runtime/InputEncoding.qs +++ b/MachineLearning/src/InputEncoding.qs @@ -64,23 +64,24 @@ namespace Microsoft.Quantum.MachineLearning { } } - function NoisyInputEncoder(tolerance: Double,coefficients : Double[]) : (LittleEndian => Unit is Adj + Ctl) { + function ApproximateInputEncoder(tolerance : Double,coefficients : Double[]) + : (LittleEndian => Unit is Adj + Ctl) { //First quantize the coefficients: for a coef x find such y*tolerance, where y is integer and |x-y*tolerance| \neq tolerance/2 let nCoefficients = Length(coefficients); - mutable coefficientsComplexPolar = new ComplexPolar[nCoefficients]; + mutable complexCoefficients = new ComplexPolar[Length(coefficients)]; mutable cNegative = 0; - for (idx in 0 .. nCoefficients - 1) { - mutable coef = coefficients[idx]; + for ((idx, coef) in Enumerated(coefficients)) { + mutable magnitude = coef; if (tolerance > 1E-9) { - set coef = tolerance * IntAsDouble(Round(coefficients[idx] / tolerance)); //quantization + set magnitude = tolerance * IntAsDouble(Round(coefficients[idx] / tolerance)); //quantization } mutable ang = 0.0; - if (coef < 0.0) { + if (magnitude < 0.0) { set cNegative += 1; - set coef = -coef; + set magnitude = -magnitude; set ang = PI(); } - set coefficientsComplexPolar w/= idx <- ComplexPolar(coef, ang); + set complexCoefficients w/= idx <- ComplexPolar(magnitude, ang); } // Check if we can apply the explicit two-qubit case. @@ -92,11 +93,11 @@ namespace Microsoft.Quantum.MachineLearning { // Here, by a "few," we mean fewer than the number of qubits required // to encode features. if ((cNegative > 0) and (IntAsDouble(cNegative) < Lg(IntAsDouble(Length(coefficients))) + 1.0)) { - return _EncodeSparseNegativeInput(cNegative, tolerance, coefficientsComplexPolar, _); //TODO:MORE:ACCEPTANCE ("Wines" passing soi far) + return _EncodeSparseNegativeInput(cNegative, tolerance, complexCoefficients, _); //TODO:MORE:ACCEPTANCE ("Wines" passing soi far) } // Finally, we fall back to arbitrary state preparation. - return ApproximatelyPrepareArbitraryState(tolerance, coefficientsComplexPolar, _); + return ApproximatelyPrepareArbitraryState(tolerance, complexCoefficients, _); } //EncodeNoisyInput //TODO:REVIEW: Design consideration! The implicit qubit count must be read off from the state encoder, NOT from the gate sequence! @@ -105,23 +106,18 @@ namespace Microsoft.Quantum.MachineLearning { /// The vector of 'coefficients' does not have to be unitary function InputEncoder(coefficients : Double[]): (LittleEndian => Unit is Adj + Ctl) { //default implementation, does not respect sparcity - let nCoefficients = Length(coefficients); - mutable coefficientsComplexPolar = new ComplexPolar[nCoefficients]; - mutable allPositive = true; - for (idx in 0 .. nCoefficients - 1) { - mutable coef = coefficients[idx]; - mutable ang = 0.0; - if (coef < 0.0) { - set allPositive = false; - set coef = -coef; - set ang = PI(); - } - set coefficientsComplexPolar w/= idx<-ComplexPolar(coef,ang); + mutable complexCoefficients = new ComplexPolar[Length(coefficients)]; + for ((idx, coefficient) in Enumerated(coefficients)) { + set complexCoefficients w/= idx <- ComplexPolar( + coefficient >= 0.0 + ? (coefficient, 0.0) + | (-coefficient, PI()) + ); } if (_CanApplyTwoQubitCase(coefficients)) { return _ApplyTwoQubitCase(coefficients, _); } - return ApproximatelyPrepareArbitraryState(1E-12, coefficientsComplexPolar, _); //this is preparing the state almost exactly so far + return ApproximatelyPrepareArbitraryState(1E-12, complexCoefficients, _); //this is preparing the state almost exactly so far } -} \ No newline at end of file +} diff --git a/MachineLearning/src/DataModel/DataModel.csproj b/MachineLearning/src/MachineLearning.csproj similarity index 58% rename from MachineLearning/src/DataModel/DataModel.csproj rename to MachineLearning/src/MachineLearning.csproj index 3aadd057759..6239e0d5313 100644 --- a/MachineLearning/src/DataModel/DataModel.csproj +++ b/MachineLearning/src/MachineLearning.csproj @@ -1,26 +1,36 @@ - - + netstandard2.1 x64 - Microsoft.Quantum.MachineLearning.DataModel - bin\$(BuildConfiguration)\$(PlatformTarget)\$(AssemblyName).xml + Microsoft.Quantum.MachineLearning true + + false + + + + + + + + + - True Microsoft Microsoft's Quantum ML Libraries. © Microsoft Corporation. All rights reserved. Microsoft.Quantum.MachineLearning - See: https://docs.microsoft.com/en-us/quantum/relnotes/ + See: https://docs.microsoft.com/quantum/relnotes/ https://github.com/Microsoft/QuantumLibraries/raw/master/LICENSE.txt - https://github.com/Microsoft/QuantumLibraries/tree/master/Chemistry + https://github.com/Microsoft/QuantumLibraries/tree/master/MachineLearning https://secure.gravatar.com/avatar/bd1f02955b2853ba0a3b1cdc2434e8ec.png Quantum Q# Qsharp 1591 - true true true true @@ -28,16 +38,4 @@ $(AllowedOutputExtensionsInPackageBuildOutputFolder);.pdb;.xml - - - - - - - - - - - - diff --git a/MachineLearning/src/Runtime/Properties/NamespaceInfo.qs b/MachineLearning/src/Properties/NamespaceInfo.qs similarity index 100% rename from MachineLearning/src/Runtime/Properties/NamespaceInfo.qs rename to MachineLearning/src/Properties/NamespaceInfo.qs diff --git a/MachineLearning/src/Runtime/RotationSequences.qs b/MachineLearning/src/RotationSequences.qs similarity index 68% rename from MachineLearning/src/Runtime/RotationSequences.qs rename to MachineLearning/src/RotationSequences.qs index 1687d8c99dc..31fa95373fd 100644 --- a/MachineLearning/src/Runtime/RotationSequences.qs +++ b/MachineLearning/src/RotationSequences.qs @@ -7,12 +7,12 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Canon; - /// What is the minimum number of qubits - /// to support the subject gate sequence? - /// Find the maximum qubit index m occuring - /// in a gate sequence and return m+1 - function NQubitsRequired(seq : GateSequence) : Int { - mutable nQubitsRequired = 0; + /// What is the minimum number of qubits + /// to support the subject gate sequence? + /// Find the maximum qubit index m occuring + /// in a gate sequence and return m+1 + function NQubitsRequired(seq : GateSequence) : Int { + mutable nQubitsRequired = 0; for (gate in seq!) { set nQubitsRequired = Fold( MaxI, 0, @@ -23,11 +23,11 @@ namespace Microsoft.Quantum.MachineLearning { ); } return nQubitsRequired; - } + } - /// Apply parameterized gate sequence to subject qubit register - /// - operation _ApplyGates(parameters : Double[], gates: GateSequence, qubits : Qubit[]) : (Unit) is Adj + Ctl { + /// Apply parameterized gate sequence to subject qubit register + /// + operation _ApplyGates(parameters : Double[], gates: GateSequence, qubits : Qubit[]) : (Unit) is Adj + Ctl { //dumpRegisterToConsole(qubits); for (gate in gates!) { // let (gsp,p,ix) = gt!; @@ -44,8 +44,8 @@ namespace Microsoft.Quantum.MachineLearning { } } - operation ApplyGates(parameters : Double[], gates: GateSequence): (Qubit[] => Unit is Adj + Ctl) { + operation ApplyGates(parameters : Double[], gates: GateSequence): (Qubit[] => Unit is Adj + Ctl) { return _ApplyGates(parameters,gates,_); - } + } } diff --git a/MachineLearning/src/Runtime/Convert.qs b/MachineLearning/src/Runtime/Convert.qs deleted file mode 100644 index bcebf05d77e..00000000000 --- a/MachineLearning/src/Runtime/Convert.qs +++ /dev/null @@ -1,63 +0,0 @@ -namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Canon; - open Microsoft.Quantum.Math; - - function unFlattenSchedule(sc : Int[][]) : SamplingSchedule - { - mutable ret = new Range[0]; - for (flattenedRange in sc) { - set ret += [flattenedRange[0]..flattenedRange[1]..flattenedRange[2]]; - } - return SamplingSchedule(ret); - } - - function unFlattenLabeledSamples(dat:Double[][], labs:Int[]) : LabeledSample[] { - mutable cnt = MinI(Length(dat), Length(labs)); - mutable ret = new LabeledSample[cnt]; - for (j in 0..(cnt - 1)) { - set ret w/= j <- LabeledSample(dat[j], labs[j]); - } - return ret; - } - - /// Debugging prop - operation unFlattenPauli(p:Int): Pauli - { - if (p==1) - { - return PauliX; - } - if (p==2) - { - return PauliY; - } - if (p==3) - { - return PauliZ; - } - return PauliI; - } - - /// Debugging prop - /// upcasting controlled rotation in flat representation (paramIx,pauliIx,gateSpan) - operation unFlattenControlledRotation(cod:Int[]): ControlledRotation { - return ControlledRotation( - GateSpan( - cod[2], cod[3...] - ), - unFlattenPauli(cod[1]), - cod[0] - ); - } - - /// Debugging prop - operation unFlattenGateSequence(seq: Int[][]) : GateSequence { - mutable tmp = new ControlledRotation[Length(seq)]; - for (icr in 0..(Length(seq) - 1)) { - set tmp w/= icr <- unFlattenControlledRotation(seq[icr]); - } - return GateSequence(tmp); - } - -} \ No newline at end of file diff --git a/MachineLearning/src/Runtime/Runtime.csproj b/MachineLearning/src/Runtime/Runtime.csproj deleted file mode 100644 index 60fa28844d9..00000000000 --- a/MachineLearning/src/Runtime/Runtime.csproj +++ /dev/null @@ -1,18 +0,0 @@ - - - netstandard2.1 - x64 - Microsoft.Quantum.MachineLearning.Runtime - true - - false - - - - - - - diff --git a/MachineLearning/src/Structure.qs b/MachineLearning/src/Structure.qs new file mode 100644 index 00000000000..c50e4acab6f --- /dev/null +++ b/MachineLearning/src/Structure.qs @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning { + + open Microsoft.Quantum.Arrays; + + function _UncontrolledSpanSequence(idxsQubits : Int[]) : GateSpan[] { + return Mapped( + GateSpan(_, new Int[0]), + idxsQubits + ); + } + + function _CallFlipped<'TInput1, 'TInput2, 'TOutput>( + fn : (('TInput1, 'TInput2) -> 'TOutput), + y : 'TInput2, x : 'TInput1 + ) : 'TOutput { + return fn(x, y); + } + + function _Flipped<'TInput1, 'TInput2, 'TOutput>( + fn : (('TInput1, 'TInput2) -> 'TOutput) + ) : (('TInput2, 'TInput1) -> 'TOutput) { + return _CallFlipped(fn, _, _); + } + + function LocalRotationsLayer(nQubits : Int, axis : Pauli) : GateSequence { + // [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] + return GateSequence(Mapped( + _Flipped(ControlledRotation(_, axis, _)), + Enumerated( + _UncontrolledSpanSequence(SequenceI(0, nQubits - 1)) + ) + )); + } + + + function PartialRotationsLayer(idxsQubits : Int[], axis : Pauli) : GateSequence { + // [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] + return GateSequence(Mapped( + _Flipped(ControlledRotation(_, axis, _)), + Enumerated( + _UncontrolledSpanSequence(idxsQubits) + ) + )); + } + + function CyclicEntanglingLayer(nQubits : Int, axis : Pauli, stride : Int) : GateSequence { + mutable rotations = new ControlledRotation[0]; + for (idxTarget in 0..nQubits - 1) { + set rotations += [ControlledRotation( + GateSpan( + idxTarget, + [(idxTarget + stride) % nQubits] + ), + axis, idxTarget + )]; + } + return GateSequence(rotations); + } + + function CombinedGateSequence(layers : GateSequence[]) : GateSequence { + mutable combined = (Head(layers))!; + mutable offset = Length(combined); + for (layer in Rest(layers)) { + for (gate in layer!) { + set combined += [gate w/ Index <- gate::Index + offset]; + } + set offset += Length(layer!); + } + return GateSequence(combined); + } + +} \ No newline at end of file diff --git a/MachineLearning/src/Runtime/Training.qs b/MachineLearning/src/Training.qs similarity index 99% rename from MachineLearning/src/Runtime/Training.qs rename to MachineLearning/src/Training.qs index 9acea0d7dc6..24d43a2c138 100644 --- a/MachineLearning/src/Runtime/Training.qs +++ b/MachineLearning/src/Training.qs @@ -129,7 +129,7 @@ namespace Microsoft.Quantum.MachineLearning { } let stateGenerator = StateGenerator( nQubits, - NoisyInputEncoder(effectiveTolerance, sample::Features) + ApproximateInputEncoder(effectiveTolerance, sample::Features) ); let grad = EstimateGradient( gates, param, stateGenerator, diff --git a/MachineLearning/src/Runtime/Types.qs b/MachineLearning/src/Types.qs similarity index 100% rename from MachineLearning/src/Runtime/Types.qs rename to MachineLearning/src/Types.qs diff --git a/MachineLearning/src/Runtime/Utils.qs b/MachineLearning/src/Utils.qs similarity index 87% rename from MachineLearning/src/Runtime/Utils.qs rename to MachineLearning/src/Utils.qs index c412f05461d..0b710c8bf29 100644 --- a/MachineLearning/src/Runtime/Utils.qs +++ b/MachineLearning/src/Utils.qs @@ -5,9 +5,9 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Canon; open Microsoft.Quantum.Math; - function _AllNearlyEqualD(v1: Double[], v2: Double[]):Bool { + function _AllNearlyEqualD(v1: Double[], v2: Double[]):Bool { return Length(v1) == Length(v2) and All(NearlyEqualD, Zip(v1, v2)); - } + } operation _TailMeasurement(nQubits : Int) : (Qubit[] => Result) { let paulis = ConstantArray(nQubits, PauliI) w/ (nQubits - 1) <- PauliZ; diff --git a/MachineLearning/src/Runtime/Validation.qs b/MachineLearning/src/Validation.qs similarity index 100% rename from MachineLearning/src/Runtime/Validation.qs rename to MachineLearning/src/Validation.qs diff --git a/MachineLearning/tests/MachineLearningTests.csproj b/MachineLearning/tests/MachineLearningTests.csproj index d2f2b5afd76..794fb415906 100644 --- a/MachineLearning/tests/MachineLearningTests.csproj +++ b/MachineLearning/tests/MachineLearningTests.csproj @@ -14,7 +14,6 @@ - - + From a35aa5fe50a77afde01660476d7bcd54245a53b2 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Thu, 16 Jan 2020 13:40:52 -0800 Subject: [PATCH 13/43] Final prep for QML review (#205) * Begin preparing for API review. * A bit more refactoring. * A bit more refactoring, more /// comments. * Progress on ///, code quality. --- MachineLearning/src/Classification.qs | 57 +++++- MachineLearning/src/Convert.qs | 66 ------- MachineLearning/src/Features.qs | 9 - MachineLearning/src/GradientEstimation.qs | 65 +++---- MachineLearning/src/InputEncoding.qs | 38 +++- MachineLearning/src/{Utils.qs => Private.qs} | 2 +- MachineLearning/src/RotationSequences.qs | 51 ------ MachineLearning/src/Structure.qs | 77 +++++++- MachineLearning/src/Training.qs | 176 ++++++++++--------- MachineLearning/src/Types.qs | 16 +- MachineLearning/src/Validation.qs | 80 ++++----- Standard/src/Preparation/Arbitrary.qs | 89 +++++++++- 12 files changed, 408 insertions(+), 318 deletions(-) delete mode 100644 MachineLearning/src/Convert.qs delete mode 100644 MachineLearning/src/Features.qs rename MachineLearning/src/{Utils.qs => Private.qs} (87%) delete mode 100644 MachineLearning/src/RotationSequences.qs diff --git a/MachineLearning/src/Classification.qs b/MachineLearning/src/Classification.qs index 771270819ca..4f998eea008 100644 --- a/MachineLearning/src/Classification.qs +++ b/MachineLearning/src/Classification.qs @@ -10,38 +10,79 @@ namespace Microsoft.Quantum.MachineLearning { operation _PrepareClassification( encoder : (LittleEndian => Unit is Adj + Ctl), + structure : SequentialClassifierStructure, parameters : Double[], - gates : GateSequence, target : Qubit[] ) : Unit is Adj { encoder(LittleEndian(target)); - _ApplyGates(parameters, gates, target); + ApplySequentialClassifier(structure, parameters, target); } + /// # Summary + /// Given a sample and a sequential classifier, estimates the + /// classification probability for that sample by repeatedly measuring + /// the output of the classifier on the given sample. + /// + /// # Input + /// ## tolerance + /// The tolerance to allow in encoding the sample into a state preparation + /// operation. + /// ## parameters + /// A parameterization of the given sequential classifier. + /// ## structure + /// The structure of the given sequential classifier. + /// ## sample + /// The feature vector for the sample to be classified. + /// ## nMeasurements + /// The number of measusrements to use in estimating the classification + /// probability. + /// # Output + /// An estimate of the classification probability for the given sample. operation EstimateClassificationProbability( - tolerance: Double, + tolerance : Double, parameters : Double[], - gates: GateSequence, - sample: Double[], + structure : SequentialClassifierStructure, + sample : Double[], nMeasurements: Int ) : Double { let nQubits = FeatureRegisterSize(sample); - let circEnc = ApproximateInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); + let circEnc = ApproximateInputEncoder(tolerance / IntAsDouble(Length(structure!)), sample); let encodedSample = StateGenerator(nQubits, circEnc); return 1.0 - EstimateFrequencyA( - _PrepareClassification(encodedSample::Apply, parameters, gates, _), + _PrepareClassification(encodedSample::Apply, structure, parameters, _), _TailMeasurement(encodedSample::NQubits), encodedSample::NQubits, nMeasurements ); } + /// # Summary + /// Given a set of samples and a sequential classifier, estimates the + /// classification probability for those samples by repeatedly measuring + /// the output of the classifier on each sample. + /// + /// # Input + /// ## tolerance + /// The tolerance to allow in encoding the sample into a state preparation + /// operation. + /// ## parameters + /// A parameterization of the given sequential classifier. + /// ## structure + /// The structure of the given sequential classifier. + /// ## samples + /// An array of feature vectors for each sample to be classified. + /// ## nMeasurements + /// The number of measusrements to use in estimating the classification + /// probability. + /// # Output + /// An array of estimates of the classification probability for each given + /// sample. operation EstimateClassificationProbabilities( tolerance : Double, parameters : Double[], - structure : GateSequence, + structure : SequentialClassifierStructure, samples : Double[][], nMeasurements : Int ) diff --git a/MachineLearning/src/Convert.qs b/MachineLearning/src/Convert.qs deleted file mode 100644 index 9149c146e3b..00000000000 --- a/MachineLearning/src/Convert.qs +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Canon; - open Microsoft.Quantum.Math; - - function unFlattenSchedule(sc : Int[][]) : SamplingSchedule - { - mutable ret = new Range[0]; - for (flattenedRange in sc) { - set ret += [flattenedRange[0]..flattenedRange[1]..flattenedRange[2]]; - } - return SamplingSchedule(ret); - } - - function unFlattenLabeledSamples(dat:Double[][], labs:Int[]) : LabeledSample[] { - mutable cnt = MinI(Length(dat), Length(labs)); - mutable ret = new LabeledSample[cnt]; - for (j in 0..(cnt - 1)) { - set ret w/= j <- LabeledSample(dat[j], labs[j]); - } - return ret; - } - - /// Debugging prop - operation unFlattenPauli(p:Int): Pauli - { - if (p==1) - { - return PauliX; - } - if (p==2) - { - return PauliY; - } - if (p==3) - { - return PauliZ; - } - return PauliI; - } - - /// Debugging prop - /// upcasting controlled rotation in flat representation (paramIx,pauliIx,gateSpan) - operation unFlattenControlledRotation(cod:Int[]): ControlledRotation { - return ControlledRotation( - GateSpan( - cod[2], cod[3...] - ), - unFlattenPauli(cod[1]), - cod[0] - ); - } - - /// Debugging prop - operation unFlattenGateSequence(seq: Int[][]) : GateSequence { - mutable tmp = new ControlledRotation[Length(seq)]; - for (icr in 0..(Length(seq) - 1)) { - set tmp w/= icr <- unFlattenControlledRotation(seq[icr]); - } - return GateSequence(tmp); - } - -} \ No newline at end of file diff --git a/MachineLearning/src/Features.qs b/MachineLearning/src/Features.qs deleted file mode 100644 index 94b882868d0..00000000000 --- a/MachineLearning/src/Features.qs +++ /dev/null @@ -1,9 +0,0 @@ -namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Math; - open Microsoft.Quantum.Convert; - - function FeatureRegisterSize(sample : Double[]) : Int { - return Ceiling(Lg(IntAsDouble(Length(sample)))); - } - -} diff --git a/MachineLearning/src/GradientEstimation.qs b/MachineLearning/src/GradientEstimation.qs index 26fd7d039e4..9843328dc25 100644 --- a/MachineLearning/src/GradientEstimation.qs +++ b/MachineLearning/src/GradientEstimation.qs @@ -19,70 +19,73 @@ namespace Microsoft.Quantum.MachineLearning { operation _EstimateDerivativeWithParameterShift( inputEncoder : StateGenerator, - gates : GateSequence, + structure : SequentialClassifierStructure, parameters : (Double[], Double[]), nQubits : Int, nMeasurements : Int ) : Double { return EstimateRealOverlapBetweenStates( _ApplyLEOperationToRawRegister(inputEncoder::Apply, _), - _ApplyGates(Fst(parameters), gates, _), - _ApplyGates(Snd(parameters), gates, _), + ApplySequentialClassifier(structure, Fst(parameters), _), + ApplySequentialClassifier(structure, Snd(parameters), _), nQubits, nMeasurements ); } /// # Summary - /// polymorphic classical/quantum gradient estimator + /// Estimates the training gradient for a sequential classifier at a + /// particular set of parameters and for a given encoded input. /// /// # Input + /// ## structure + /// The structure of the sequential classifier as a sequence of quantum + /// operations. /// ## param - /// circuit parameters - /// - /// ## gates - /// sequence of gates in the circuits - /// + /// A set of parameters for the given classifier structure. /// ## sg - /// generates quantum encoding of a subject sample (either simulated or true) - /// - /// ## measCount - /// number of true quantum measurements to estimate probabilities. - /// IMPORTANT: measCount==0 implies simulator deployment + /// An input to the sequential classifier, encoded into a state preparation + /// operation. + /// ## nMeasurements + /// The number of measurements to use in estimating the gradient. /// /// # Output - /// the gradient + /// An estimate of the training gradient at the given input and model + /// parameters. /// + /// # Remarks + /// This operation uses a Hadamard test and the parameter shift technique + /// together to estimate the gradient. operation EstimateGradient( - gates : GateSequence, + structure : SequentialClassifierStructure, param : Double[], sg : StateGenerator, nMeasurements : Int ) : (Double[]) { - //Synopsis: Suppose (param,gates) define Circ0 - //Suppose (param1,gates1) define Circ1 that implements one-gate derivative of Circ0 - //The expectation derivative is then 2 Re[] = - // Re[] - Re[] - //We observe SEE THEORY that for (Circ1)=(Circ0)' , Re[]==0 - //Thus we are left to compute Re[] = - // 1 - 1/2 < (Z \otimes Id) Circ0 psi - Circ1 psi | (Z \otimes Id) Circ0 psi - Circ1 psi> - //i.e., 1 - HadamardTestResultHack(Circ1,[Z],Circ0) + // Synopsis: Suppose (param,gates) define Circ0 + // Suppose (param1,gates1) define Circ1 that implements one-gate derivative of Circ0 + // The expectation derivative is then 2 Re[] = + // Re[] - Re[] + // We observe SEE THEORY that for (Circ1)=(Circ0)' , Re[]==0 + // Thus we are left to compute Re[] = + // 1 - 1/2 < (Z \otimes Id) Circ0 psi - Circ1 psi | (Z \otimes Id) Circ0 psi - Circ1 psi> + // i.e., 1 - HadamardTestResultHack(Circ1,[Z],Circ0) - //Now, suppose a gate at which we differentiate is the (Controlled R(\theta))([k0,k1,...,kr],[target]) - //and we want a unitary description of its \theta-derivative. It can be written as + // Now, suppose a gate at which we differentiate is the (Controlled R(\theta))([k0,k1,...,kr],[target]) + // and we want a unitary description of its \theta-derivative. It can be written as // 1/2 {(Controlled R(\theta'))([k0,k1,...,kr],[target]) - (Controlled Z)([k1,...,kr],[k0])(Controlled R(\theta'))([k0,k1,...,kr],[target])} mutable grad = ConstantArray(Length(param), 0.0); - let nQubits = MaxI(NQubitsRequired(gates), sg::NQubits); + let nQubits = MaxI(NQubitsRequired(structure), sg::NQubits); - for (gate in gates!) { + for (gate in structure!) { let paramShift = (param + [0.0]) // Shift the corresponding parameter. w/ gate::Index <- (param[gate::Index] + PI()); // NB: This the *antiderivative* of the bracket let newDer = _EstimateDerivativeWithParameterShift( - sg, gates, (param, paramShift), nQubits, nMeasurements + sg, structure, (param, paramShift), nQubits, nMeasurements ); if (IsEmpty(gate::Span::ControlIndices)) { //uncontrolled gate @@ -91,10 +94,10 @@ namespace Microsoft.Quantum.MachineLearning { //controlled gate let controlledShift = paramShift w/ gate::Index <- (param[gate::Index] + 3.0 * PI()); - //Assumption: any rotation R has the property that R(\theta+2 Pi)=(-1).R(\theta) + // Assumption: any rotation R has the property that R(\theta + 2 Pi) = (-1) R(\theta). // NB: This the *antiderivative* of the bracket let newDer1 = _EstimateDerivativeWithParameterShift( - sg, gates, (param, controlledShift), nQubits, nMeasurements + sg, structure, (param, controlledShift), nQubits, nMeasurements ); set grad w/= gate::Index <- (grad[gate::Index] + 0.5 * (newDer - newDer1)); } diff --git a/MachineLearning/src/InputEncoding.qs b/MachineLearning/src/InputEncoding.qs index 1eb4b1db7d3..d997987ba19 100644 --- a/MachineLearning/src/InputEncoding.qs +++ b/MachineLearning/src/InputEncoding.qs @@ -60,11 +60,38 @@ namespace Microsoft.Quantum.MachineLearning { // Reflect about the negative coefficients to apply the negative signs // at the end. for (idxNegative in negLocs) { - ReflectAboutInteger(idxNegative, reg); //TODO:REVIEW: this assumes that 2^Length(reg) is the minimal pad to Length(coefficients) + ReflectAboutInteger(idxNegative, reg); } } - function ApproximateInputEncoder(tolerance : Double,coefficients : Double[]) + /// # Summary + /// Returns the number of qubits required to encode a particular feature + /// vector. + /// + /// # Input + /// ## sample + /// A sample feature vector to be encoded into a qubit register. + /// + /// # Output + /// The size required to encode `sample` into a qubit register, expressed + /// as a number of qubits. + function FeatureRegisterSize(sample : Double[]) : Int { + return Ceiling(Lg(IntAsDouble(Length(sample)))); + } + + /// # Summary + /// Given a set of coefficients and a tolerance, returns a state preparation + /// operation that prepares each coefficient as the corresponding amplitude + /// of a computational basis state, up to the given tolerance. + /// + /// # Input + /// ## tolerance + /// // TODO + /// ## coefficients + /// // TODO + /// # Output + /// // TODO + function ApproximateInputEncoder(tolerance : Double, coefficients : Double[]) : (LittleEndian => Unit is Adj + Ctl) { //First quantize the coefficients: for a coef x find such y*tolerance, where y is integer and |x-y*tolerance| \neq tolerance/2 let nCoefficients = Length(coefficients); @@ -93,18 +120,17 @@ namespace Microsoft.Quantum.MachineLearning { // Here, by a "few," we mean fewer than the number of qubits required // to encode features. if ((cNegative > 0) and (IntAsDouble(cNegative) < Lg(IntAsDouble(Length(coefficients))) + 1.0)) { - return _EncodeSparseNegativeInput(cNegative, tolerance, complexCoefficients, _); //TODO:MORE:ACCEPTANCE ("Wines" passing soi far) + return _EncodeSparseNegativeInput(cNegative, tolerance, complexCoefficients, _); } // Finally, we fall back to arbitrary state preparation. return ApproximatelyPrepareArbitraryState(tolerance, complexCoefficients, _); } //EncodeNoisyInput - //TODO:REVIEW: Design consideration! The implicit qubit count must be read off from the state encoder, NOT from the gate sequence! - /// Create amplitude encoding of an array of real-valued coefficients /// The vector of 'coefficients' does not have to be unitary - function InputEncoder(coefficients : Double[]): (LittleEndian => Unit is Adj + Ctl) { + function InputEncoder(coefficients : Double[]) + : (LittleEndian => Unit is Adj + Ctl) { //default implementation, does not respect sparcity mutable complexCoefficients = new ComplexPolar[Length(coefficients)]; for ((idx, coefficient) in Enumerated(coefficients)) { diff --git a/MachineLearning/src/Utils.qs b/MachineLearning/src/Private.qs similarity index 87% rename from MachineLearning/src/Utils.qs rename to MachineLearning/src/Private.qs index 0b710c8bf29..83540965013 100644 --- a/MachineLearning/src/Utils.qs +++ b/MachineLearning/src/Private.qs @@ -5,7 +5,7 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Canon; open Microsoft.Quantum.Math; - function _AllNearlyEqualD(v1: Double[], v2: Double[]):Bool { + function _AllNearlyEqualD(v1 : Double[], v2 : Double[]) : Bool { return Length(v1) == Length(v2) and All(NearlyEqualD, Zip(v1, v2)); } diff --git a/MachineLearning/src/RotationSequences.qs b/MachineLearning/src/RotationSequences.qs deleted file mode 100644 index 31fa95373fd..00000000000 --- a/MachineLearning/src/RotationSequences.qs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Math; - open Microsoft.Quantum.Arrays; - open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Canon; - - /// What is the minimum number of qubits - /// to support the subject gate sequence? - /// Find the maximum qubit index m occuring - /// in a gate sequence and return m+1 - function NQubitsRequired(seq : GateSequence) : Int { - mutable nQubitsRequired = 0; - for (gate in seq!) { - set nQubitsRequired = Fold( - MaxI, 0, - gate::Span::ControlIndices + [ - gate::Span::TargetIndex, - nQubitsRequired - ] - ); - } - return nQubitsRequired; - } - - /// Apply parameterized gate sequence to subject qubit register - /// - operation _ApplyGates(parameters : Double[], gates: GateSequence, qubits : Qubit[]) : (Unit) is Adj + Ctl { - //dumpRegisterToConsole(qubits); - for (gate in gates!) { - // let (gsp,p,ix) = gt!; - if (gate::Index < Length(parameters)) { - let input = (gate::Axis, parameters[gate::Index], qubits[gate::Span::TargetIndex]); - if (IsEmpty(gate::Span::ControlIndices)) { - // Uncontrolled rotation of target - R(input); - } else { - //TODO: should one validate the control indices first? - (Controlled R)(Subarray(gate::Span::ControlIndices, qubits), input); - } - } - } - } - - operation ApplyGates(parameters : Double[], gates: GateSequence): (Qubit[] => Unit is Adj + Ctl) { - return _ApplyGates(parameters,gates,_); - } - -} diff --git a/MachineLearning/src/Structure.qs b/MachineLearning/src/Structure.qs index c50e4acab6f..41823f43782 100644 --- a/MachineLearning/src/Structure.qs +++ b/MachineLearning/src/Structure.qs @@ -2,8 +2,67 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.MachineLearning { - + open Microsoft.Quantum.Math; open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + + /// # Summary + /// Returns the number of qubits required to apply a given sequential + /// classifier. + /// + /// # Input + /// ## structure + /// The structure of a given sequential classifier. + /// + /// # Output + /// The minimum size of a register on which the sequential classifier + /// may be applied. + function NQubitsRequired(structure : SequentialClassifierStructure) + : Int { + mutable nQubitsRequired = 0; + for (gate in structure!) { + set nQubitsRequired = Fold( + MaxI, 0, + gate::Span::ControlIndices + [ + gate::Span::TargetIndex, + nQubitsRequired + ] + ); + } + return nQubitsRequired; + } + + /// # Summary + /// Given the structure and parameterization of a sequential classifier, + /// applies the classifier to a register of qubits. + /// + /// # Input + /// ## structure + /// Structure of the given sequential classifier. + /// ## parameters + /// A parameterization at which the given structure is applied. + /// ## qubits + /// A target register to which the classifier should be applied. + operation ApplySequentialClassifier( + structure : SequentialClassifierStructure, + parameters : Double[], + qubits : Qubit[] + ) + : (Unit) is Adj + Ctl { + for (gate in structure!) { + if (gate::Index < Length(parameters)) { + let input = (gate::Axis, parameters[gate::Index], qubits[gate::Span::TargetIndex]); + if (IsEmpty(gate::Span::ControlIndices)) { + // Uncontrolled rotation of target + R(input); + } else { + //TODO: should one validate the control indices first? + (Controlled R)(Subarray(gate::Span::ControlIndices, qubits), input); + } + } + } + } function _UncontrolledSpanSequence(idxsQubits : Int[]) : GateSpan[] { return Mapped( @@ -25,9 +84,9 @@ namespace Microsoft.Quantum.MachineLearning { return _CallFlipped(fn, _, _); } - function LocalRotationsLayer(nQubits : Int, axis : Pauli) : GateSequence { + function LocalRotationsLayer(nQubits : Int, axis : Pauli) : SequentialClassifierStructure { // [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] - return GateSequence(Mapped( + return SequentialClassifierStructure(Mapped( _Flipped(ControlledRotation(_, axis, _)), Enumerated( _UncontrolledSpanSequence(SequenceI(0, nQubits - 1)) @@ -36,9 +95,9 @@ namespace Microsoft.Quantum.MachineLearning { } - function PartialRotationsLayer(idxsQubits : Int[], axis : Pauli) : GateSequence { + function PartialRotationsLayer(idxsQubits : Int[], axis : Pauli) : SequentialClassifierStructure { // [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] - return GateSequence(Mapped( + return SequentialClassifierStructure(Mapped( _Flipped(ControlledRotation(_, axis, _)), Enumerated( _UncontrolledSpanSequence(idxsQubits) @@ -46,7 +105,7 @@ namespace Microsoft.Quantum.MachineLearning { )); } - function CyclicEntanglingLayer(nQubits : Int, axis : Pauli, stride : Int) : GateSequence { + function CyclicEntanglingLayer(nQubits : Int, axis : Pauli, stride : Int) : SequentialClassifierStructure { mutable rotations = new ControlledRotation[0]; for (idxTarget in 0..nQubits - 1) { set rotations += [ControlledRotation( @@ -57,10 +116,10 @@ namespace Microsoft.Quantum.MachineLearning { axis, idxTarget )]; } - return GateSequence(rotations); + return SequentialClassifierStructure(rotations); } - function CombinedGateSequence(layers : GateSequence[]) : GateSequence { + function CombinedStructure(layers : SequentialClassifierStructure[]) : SequentialClassifierStructure { mutable combined = (Head(layers))!; mutable offset = Length(combined); for (layer in Rest(layers)) { @@ -69,7 +128,7 @@ namespace Microsoft.Quantum.MachineLearning { } set offset += Length(layer!); } - return GateSequence(combined); + return SequentialClassifierStructure(combined); } } \ No newline at end of file diff --git a/MachineLearning/src/Training.qs b/MachineLearning/src/Training.qs index 24d43a2c138..ac0fa94eb52 100644 --- a/MachineLearning/src/Training.qs +++ b/MachineLearning/src/Training.qs @@ -46,13 +46,45 @@ namespace Microsoft.Quantum.MachineLearning { return optimum::Coordinate; } + /// # Summary + /// Given the structure of a sequential classifier, trains the classifier + /// on a given labeled training set. + /// + /// # Input + /// ## structure + /// Structure of the sequential classifier to be trained. + /// ## parameterSource + /// A list of parameter vectors to use as starting points during training. + /// ## samples + /// A set of labeled training data that will be used to perform training. + /// ## options + /// Configuration to be used when training; see + /// @"microsoft.quantum.machinelearning.trainingoptions" and + /// @"microsoft.quantum.machinelearning.defaulttrainingoptions" for more + /// details. + /// ## trainingSchedule + /// A sampling schedule to use when selecting samples from the training + /// data during training steps. + /// ## validationSchedule + /// A sampling schedule to use when selecting samples from the training + /// data when selecting which start point resulted in the best classifier + /// score. + /// + /// # Output + /// A parameterization of the given classifier and a bias between the two + /// classes, together corresponding to the best result from each of the + /// given start points. + /// + /// # See Also + /// - Microsoft.Quantum.MachineLearning.TrainSequentialClassifierAtModel + /// - Microsoft.Quantum.MachineLearning.ValidateSequentialClassifier operation TrainSequentialClassifier( - gates: GateSequence, - parameterSource: Double[][], - samples: LabeledSample[], + structure : SequentialClassifierStructure, + parameterSource : Double[][], + samples : LabeledSample[], options : TrainingOptions, - trainingSchedule: SamplingSchedule, - validationSchedule: SamplingSchedule + trainingSchedule : SamplingSchedule, + validationSchedule : SamplingSchedule ) : SequentialModel { mutable bestSoFar = SequentialModel([-1E12], -2.0); mutable bestValidation = Length(samples) + 1; @@ -60,16 +92,16 @@ namespace Microsoft.Quantum.MachineLearning { let features = Mapped(_Features, samples); let labels = Mapped(_Label, samples); - for (idxStart in 0..(Length(parameterSource) - 1)) { + for ((idxStart, parameters) in Enumerated(parameterSource)) { Message($"Beginning training at start point #{idxStart}..."); let proposedUpdate = TrainSequentialClassifierAtModel( - gates, SequentialModel(parameterSource[idxStart], 0.0), + structure, SequentialModel(parameters, 0.0), samples, options, trainingSchedule, 1 ); let probabilities = EstimateClassificationProbabilities( options::Tolerance, proposedUpdate::Parameters, - gates, + structure, Sampled(validationSchedule, features), options::NMeasurements ); @@ -115,7 +147,7 @@ namespace Microsoft.Quantum.MachineLearning { operation _RunSingleTrainingStep( miniBatch : LabeledSample[], options : TrainingOptions, - param : Double[], gates : GateSequence + param : Double[], gates : SequentialClassifierStructure ) : (Double, Double[]) { mutable batchGradient = ConstantArray(Length(param), 0.0); @@ -148,49 +180,42 @@ namespace Microsoft.Quantum.MachineLearning { } /// # Summary - /// Perform one epoch of circuit training on a subset of data samples to a quantum simulator + /// Perform one epoch of sequential classifier training on a subset of + /// data samples. /// /// # Input /// ## samples - /// a container of available data samples - /// + /// The samples to be trained on. /// ## sched - /// a schedule of the data subset for this training loop - /// + /// A sampling schedule defining a subset of samples to be included in training. /// ## schedScore - /// defines a (possibly different) data subset on which accuracy scoring is performed - /// + /// A sampling schedule defining a subset of samples to be used in + /// accuracy scoring. /// ## periodScore - /// number of blind gradient steps between scoring points (performance tool, set to 1 for best accuracy) - /// - /// ## miniBatchSize - /// number of samples in a gradient mini batch - /// - /// ## param - /// initial parameter vector - /// - /// ## gates - /// sequence of gates in the circuit - /// - /// ## bias - /// reserved for future use; originally - initial prediction bias - /// - /// ## lrate - /// learning rate - /// - /// ## measCount - /// number of true quantum measurements to estimate probabilities. + /// The number of gradient steps to be taken between scoring points. + /// For best accuracy, set to 1. + /// ## options + /// Options to be used in training. + /// ## structure + /// The structure of the sequential classifier to be trained. + /// ## model + /// The parameterization and bias of the sequential model to be trained. + /// ## nPreviousBestMisses + /// The best number of misclassifications observed in previous epochs. /// + /// # Output + /// - The smallest number of misclassifications observed through to this + /// epoch. + /// - The new best sequential model found. operation _RunSingleTrainingEpoch( - samples: LabeledSample[], - schedule: SamplingSchedule, periodScore: Int, + samples : LabeledSample[], + schedule : SamplingSchedule, periodScore: Int, options : TrainingOptions, - model : SequentialModel, gates: GateSequence, + structure : SequentialClassifierStructure, + model : SequentialModel, nPreviousBestMisses : Int ) : (Int, SequentialModel) { - let HARDCODEDunderage = 3; // 4/26 slack greater than 3 is not recommended - mutable nBestMisses = nPreviousBestMisses; mutable bestSoFar = model; let features = Mapped(_Features, samples); @@ -199,7 +224,7 @@ namespace Microsoft.Quantum.MachineLearning { let inferredLabels = InferredLabels( model::Bias, EstimateClassificationProbabilities( - options::Tolerance, model::Parameters, gates, + options::Tolerance, model::Parameters, structure, features, options::NMeasurements ) ); @@ -214,14 +239,14 @@ namespace Microsoft.Quantum.MachineLearning { ); for (minibatch in minibatches) { let (utility, updatedParameters) = _RunSingleTrainingStep( - minibatch, options, bestSoFar::Parameters, gates + minibatch, options, bestSoFar::Parameters, structure ); if (utility > 0.0000001) { // There has been some good parameter update. // Check if it actually improves things, and if so, // commit it. let probabilities = EstimateClassificationProbabilities( - options::Tolerance, updatedParameters, gates, + options::Tolerance, updatedParameters, structure, features, options::NMeasurements ); let updatedBias = _UpdatedBias( @@ -252,52 +277,38 @@ namespace Microsoft.Quantum.MachineLearning { ); } + + /// # Summary - /// Run a full circuit training loop on a subset of data samples + /// Given the structure of a sequential classifier, trains the classifier + /// on a given labeled training set, starting from a particular model. /// /// # Input + /// ## structure + /// Structure of the sequential classifier to be trained. + /// ## model + /// The sequential model to be used as a starting point for training. /// ## samples - /// a container of available data samples - /// - /// ## sched - /// a schedule of the data subset for this training loop - /// - /// ## schedScore - /// defines a (possibly different) data subset on which accuracy scoring is performed - /// - /// ## periodScore - /// number of blind gradient steps between scoring points (performance tool, set to 1 for best accuracy) - /// - /// ## miniBatchSize - /// number of samples in a gradient mini batch - /// - /// ## param - /// initial parameter vector - /// - /// ## gates - /// sequence of gates in the circuit - /// - /// ## bias - /// reserved for future use; originally - initial prediction bias - /// - /// ## lrate - /// learning rate - /// - /// ## maxEpochs - /// maximum number of epochs in this loop - /// - /// ## tol - /// tolerance: acceptable misprediction rate in training - /// - /// ## measCount - /// number of true quantum measurements to estimate probabilities. - /// IMPORTANT: measCount==0 implies simulator deployment + /// A set of labeled training data that will be used to perform training. + /// ## options + /// Configuration to be used when training; see + /// @"microsoft.quantum.machinelearning.trainingoptions" and + /// @"microsoft.quantum.machinelearning.defaulttrainingoptions" for more + /// details. + /// ## schedule + /// A sampling schedule to use when selecting samples from the training + /// data during training steps. /// /// # Output - /// ((no.hits,no.misses),(opt.bias,opt.parameters)) + /// A parameterization of the given classifier and a bias between the two + /// classes, together corresponding to the best result from each of the + /// given start points. /// + /// # See Also + /// - Microsoft.Quantum.MachineLearning.TrainSequentialClassifier + /// - Microsoft.Quantum.MachineLearning.ValidateSequentialClassifier operation TrainSequentialClassifierAtModel( - gates : GateSequence, + gates : SequentialClassifierStructure, model : SequentialModel, samples : LabeledSample[], options : TrainingOptions, @@ -305,7 +316,6 @@ namespace Microsoft.Quantum.MachineLearning { periodScore : Int ) : SequentialModel { - //const let nSamples = Length(samples); let features = Mapped(_Features, samples); let actualLabels = Mapped(_Label, samples); @@ -339,7 +349,7 @@ namespace Microsoft.Quantum.MachineLearning { options w/ LearningRate <- lrate w/ MinibatchSize <- batchSize, - current, gates, + gates, current, nBestMisses ); if (nMisses < nBestMisses) { diff --git a/MachineLearning/src/Types.qs b/MachineLearning/src/Types.qs index 3d8ca3ab9a5..3174cad8db5 100644 --- a/MachineLearning/src/Types.qs +++ b/MachineLearning/src/Types.qs @@ -20,7 +20,7 @@ namespace Microsoft.Quantum.MachineLearning { ); /// Abstraction for sequence of gates - newtype GateSequence = ControlledRotation[]; + newtype SequentialClassifierStructure = ControlledRotation[]; /// Abstraction for state preparation /// Fst(StateGenerator) is the number of qubits @@ -99,6 +99,20 @@ namespace Microsoft.Quantum.MachineLearning { StochasticRescaleFactor: Double ); + /// # Summary + /// Returns a default set of options for training classifiers. + /// + /// # Output + /// A reasonable set of default training options for use when training + /// classifiers. + /// + /// # Example + /// To use the default options, but with additional measurements, use the + /// `w/` operator: + /// ```Q# + /// let options = DefaultTrainingOptions() + /// w/ NMeasurements <- 1000000; + /// ``` function DefaultTrainingOptions() : TrainingOptions { return TrainingOptions( 0.1, 0.005, 15, 10000, 16, 8, 0.01 diff --git a/MachineLearning/src/Validation.qs b/MachineLearning/src/Validation.qs index f146eb4427a..99eefbc8273 100644 --- a/MachineLearning/src/Validation.qs +++ b/MachineLearning/src/Validation.qs @@ -4,6 +4,25 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Logical; open Microsoft.Quantum.Canon; + /// # Summary + /// Given a set of inferred labels and a set of correct labels, returns + /// indices for where each set of labels differs. + /// + /// # Input + /// ## inferredLabels + /// The labels inferred for a given training or validation set. + /// ## actualLabels + /// The true labels for a given training or validation set. + /// + /// # Output + /// An array of indices `idx` such that + /// `inferredLabels[idx] != actualLabels[idx]`. + /// + /// # Example + /// ```Q# + /// let misclassifications = Misclassifications([0, 1, 0, 0], [0, 1, 1, 0]); + /// Message($"{misclassifications}"); // Will print [2]. + /// ``` function Misclassifications(inferredLabels : Int[], actualLabels : Int[]) : Int[] { return Where( @@ -12,58 +31,31 @@ namespace Microsoft.Quantum.MachineLearning { ); } - function NMisclassifications(proposed: Int[], actual: Int[]): Int { - return Length(Misclassifications(proposed, actual)); - } - /// # Summary - /// Using a flat description of a trained classification model, count - /// the number of mispredictions occuring over the validation set + /// Given a set of inferred labels and a set of correct labels, returns + /// the number of indices at which each set of labels differ. /// /// # Input - /// ## nQubits - /// the number of qubits used for data encoding - /// - /// ## trainingSet - /// the set of training samples - /// - /// ## trainingLabels - /// the set of training labels - /// - /// ## validatioSchedule - /// defines a subset of training data used for validation and computation of the *bias* - /// - /// ## gates - /// Flat representation of classifier structure. Each element is - /// [parameterIndex, pauliCode, targetQubit, sequence of control qubits] - /// - /// ## parameters - /// an array of candidate parameters - /// - /// ## bias - /// candidate predition bias - /// - /// ## nMeasurenets - /// number of the measurement cycles to be used for estimation of each probability + /// ## inferredLabels + /// The labels inferred for a given training or validation set. + /// ## actualLabels + /// The true labels for a given training or validation set. /// /// # Output - /// the number of misclassifications + /// The number of indices `idx` such that + /// `inferredLabels[idx] != actualLabels[idx]`. /// - operation CountValidationMisses(tolerance: Double, nQubits: Int, trainingSet: Double[][], trainingLabels: Int[], validationSchedule: Int[][], gates: Int[][], parameters: Double[],bias:Double, nMeasurements: Int) : Int - { - let schValidate = unFlattenSchedule(validationSchedule); - let results = ValidateModel( - unFlattenGateSequence(gates), - SequentialModel(parameters, bias), - Mapped(LabeledSample, Zip(trainingSet, trainingLabels)), - tolerance, nMeasurements, - schValidate - ); - return results::NMisclassifications; + /// # Example + /// ```Q# + /// let nMisclassifications = NMisclassifications([1, 1, 0, 0], [0, 1, 1, 0]); + /// Message($"{nMisclassifications}"); // Will print 2. + /// ``` + function NMisclassifications(proposed: Int[], actual: Int[]): Int { + return Length(Misclassifications(proposed, actual)); } - operation ValidateModel( - gates: GateSequence, + operation ValidateSequentialClassifier( + gates: SequentialClassifierStructure, model : SequentialModel, samples : LabeledSample[], tolerance: Double, diff --git a/Standard/src/Preparation/Arbitrary.qs b/Standard/src/Preparation/Arbitrary.qs index 8b12642d855..7115ca75c48 100644 --- a/Standard/src/Preparation/Arbitrary.qs +++ b/Standard/src/Preparation/Arbitrary.qs @@ -111,15 +111,25 @@ namespace Microsoft.Quantum.Preparation { /// # Summary - /// Returns an operation that prepares a given quantum state. + /// Given a set of coefficients and a little-endian encoded quantum register, + /// prepares an state on that register described by the given coefficients. /// - /// The returned operation $U$ prepares an arbitrary quantum + /// # Description + /// This operation prepares an arbitrary quantum /// state $\ket{\psi}$ with complex coefficients $r_j e^{i t_j}$ from - /// the $n$-qubit computational basis state $\ket{0...0}$. + /// the $n$-qubit computational basis state $\ket{0 \cdots 0}$. + /// In particular, the action of this operation can be simulated by the + /// a unitary transformation $U$ which acts on the all-zeros state as /// /// $$ /// \begin{align} - /// U\ket{0...0}=\ket{\psi}=\frac{\sum_{j=0}^{2^n-1}r_j e^{i t_j}\ket{j}}{\sqrt{\sum_{j=0}^{2^n-1}|r_j|^2}}. + /// U\ket{0...0} + /// & = \ket{\psi} \\\\ + /// & = \frac{ + /// \sum_{j=0}^{2^n-1} r_j e^{i t_j} \ket{j} + /// }{ + /// \sqrt{\sum_{j=0}^{2^n-1} |r_j|^2} + /// }. /// \end{align} /// $$ /// @@ -144,12 +154,70 @@ namespace Microsoft.Quantum.Preparation { /// - Synthesis of Quantum Logic Circuits /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov /// https://arxiv.org/abs/quant-ph/0406176 + /// + /// # See Also + /// - Microsoft.Quantum.Preparation.ApproximatelyPrepareArbitraryState operation PrepareArbitraryState(coefficients : ComplexPolar[], qubits : LittleEndian) : Unit is Adj + Ctl { ApproximatelyPrepareArbitraryState(0.0, coefficients, qubits); } - /// TODO - operation ApproximatelyPrepareArbitraryState(tolerance : Double, coefficients : ComplexPolar[], qubits : LittleEndian) : Unit is Adj + Ctl { + /// # Summary + /// Given a set of coefficients and a little-endian encoded quantum register, + /// prepares an state on that register described by the given coefficients, + /// up to a given approximation tolerance. + /// + /// # Description + /// This operation prepares an arbitrary quantum + /// state $\ket{\psi}$ with complex coefficients $r_j e^{i t_j}$ from + /// the $n$-qubit computational basis state $\ket{0 \cdots 0}$. + /// In particular, the action of this operation can be simulated by the + /// a unitary transformation $U$ which acts on the all-zeros state as + /// + /// $$ + /// \begin{align} + /// U\ket{0...0} + /// & = \ket{\psi} \\\\ + /// & = \frac{ + /// \sum_{j=0}^{2^n-1} r_j e^{i t_j} \ket{j} + /// }{ + /// \sqrt{\sum_{j=0}^{2^n-1} |r_j|^2} + /// }. + /// \end{align} + /// $$ + /// + /// # Input + /// ## tolerance + /// The approximation tolerance to be used when preparing the given state. + /// + /// ## coefficients + /// Array of up to $2^n$ complex coefficients represented by their + /// absolute value and phase $(r_j, t_j)$. The $j$th coefficient + /// indexes the number state $\ket{j}$ encoded in little-endian format. + /// + /// ## qubits + /// Qubit register encoding number states in little-endian format. This is + /// expected to be initialized in the computational basis state + /// $\ket{0...0}$. + /// + /// # Remarks + /// Negative input coefficients $r_j < 0$ will be treated as though + /// positive with value $|r_j|$. `coefficients` will be padded with + /// elements $(r_j, t_j) = (0.0, 0.0)$ if fewer than $2^n$ are + /// specified. + /// + /// # References + /// - Synthesis of Quantum Logic Circuits + /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov + /// https://arxiv.org/abs/quant-ph/0406176 + /// + /// # See Also + /// - Microsoft.Quantum.Preparation.ApproximatelyPrepareArbitraryState + operation ApproximatelyPrepareArbitraryState( + tolerance : Double, + coefficients : ComplexPolar[], + qubits : LittleEndian + ) + : Unit is Adj + Ctl { // pad coefficients at tail length to a power of 2. let coefficientsPadded = Padded(-2 ^ Length(qubits!), ComplexPolar(0.0, 0.0), coefficients); let target = (qubits!)[0]; @@ -168,7 +236,10 @@ namespace Microsoft.Quantum.Preparation { /// # See Also /// - PrepareArbitraryState /// - Microsoft.Quantum.Canon.MultiplexPauli - operation _ApproximatelyPrepareArbitraryState(tolerance: Double, coefficients : ComplexPolar[], control : LittleEndian, target : Qubit) + operation _ApproximatelyPrepareArbitraryState( + tolerance : Double, coefficients : ComplexPolar[], + control : LittleEndian, target : Qubit + ) : Unit is Adj + Ctl { // For each 2D block, compute disentangling single-qubit rotation parameters let (disentanglingY, disentanglingZ, newCoefficients) = _StatePreparationSBMComputeCoefficients(coefficients); @@ -188,9 +259,9 @@ namespace Microsoft.Quantum.Preparation { } } else { if (_AnyOutsideToleranceCP(tolerance, newCoefficients)) { - let newControl = LittleEndian((control!)[1 .. Length(control!) - 1]); + let newControl = LittleEndian(Rest(control!)); let newTarget = (control!)[0]; - _ApproximatelyPrepareArbitraryState(tolerance,newCoefficients, newControl, newTarget); + _ApproximatelyPrepareArbitraryState(tolerance, newCoefficients, newControl, newTarget); } } } From d8c437194a5d6ef980dee2a112e66e555d5b9a7a Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Thu, 23 Jan 2020 14:56:11 -0800 Subject: [PATCH 14/43] Fix path to pubkey for QML signing. --- MachineLearning/Common/DelaySign.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MachineLearning/Common/DelaySign.cs b/MachineLearning/Common/DelaySign.cs index 8a98ee2ffaa..b87eea67bcd 100644 --- a/MachineLearning/Common/DelaySign.cs +++ b/MachineLearning/Common/DelaySign.cs @@ -2,7 +2,7 @@ // Attributes for delay-signing #if SIGNED -[assembly:AssemblyKeyFile("..\\..\\Common\\267DevDivSNKey2048.snk")] +[assembly:AssemblyKeyFile("..\\Common\\267DevDivSNKey2048.snk")] [assembly:AssemblyDelaySign(true)] #endif From 27cae563426215ff4fe43702ee4b053a2c6f63ff Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Tue, 28 Jan 2020 11:08:51 -0800 Subject: [PATCH 15/43] Update QML signing for new pipeline. (#217) --- MachineLearning/Common/267DevDivSNKey2048.snk | Bin 288 -> 0 bytes MachineLearning/Common/DelaySign.cs | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 MachineLearning/Common/267DevDivSNKey2048.snk diff --git a/MachineLearning/Common/267DevDivSNKey2048.snk b/MachineLearning/Common/267DevDivSNKey2048.snk deleted file mode 100644 index 7a1fffb890a43a90273251a13141c1b92d5b2672..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 288 zcmV+*0pI=rBme*mfB*m#0RR970ssI2Bme+XQ$aBR2mk;90097DGnudPj`5axxzmR}d}v>#e4lBjR&}2Era&fE7}!jUWnTqBY9YFX4_ zm5IimTdxd3d8Bg@*(FuY0BX#OSy}SEZV=;O!$mit(q`I#g@t8<$X$Wje?Gt-*;Y(g z**$SQ&=)L@BcTrhIk*g#uy;1~*svm0Do%7I8ec;wBr%IMIi;Y7_~kt>6JMO9e><|_ z3jYf5gp~yQeCj{(lH85p?X6_Sj{W0IR?`BlRsC*&^8(fH&Fbk>WsNjaRvTY7#X|4f m-JghsIMZ~_@dm8-**Zi;!^b??5%5 Date: Fri, 31 Jan 2020 09:23:53 -0800 Subject: [PATCH 16/43] Update project files to newest QDK release. --- Chemistry/tests/ChemistryTests/QSharpTests.csproj | 2 +- Chemistry/tests/SystemTests/SystemTests.csproj | 2 +- MachineLearning/src/MachineLearning.csproj | 2 +- MachineLearning/tests/MachineLearningTests.csproj | 2 +- Numerics/src/Numerics.csproj | 4 ++-- Numerics/tests/NumericsTests.csproj | 2 +- Standard/src/Standard.csproj | 4 ++-- Standard/tests/Standard.Tests.csproj | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Chemistry/tests/ChemistryTests/QSharpTests.csproj b/Chemistry/tests/ChemistryTests/QSharpTests.csproj index ef74163503e..9196080d248 100644 --- a/Chemistry/tests/ChemistryTests/QSharpTests.csproj +++ b/Chemistry/tests/ChemistryTests/QSharpTests.csproj @@ -1,4 +1,4 @@ - + diff --git a/Chemistry/tests/SystemTests/SystemTests.csproj b/Chemistry/tests/SystemTests/SystemTests.csproj index 7cce214df84..83477b28bb7 100644 --- a/Chemistry/tests/SystemTests/SystemTests.csproj +++ b/Chemistry/tests/SystemTests/SystemTests.csproj @@ -1,4 +1,4 @@ - + diff --git a/MachineLearning/src/MachineLearning.csproj b/MachineLearning/src/MachineLearning.csproj index 6239e0d5313..d9204dafa29 100644 --- a/MachineLearning/src/MachineLearning.csproj +++ b/MachineLearning/src/MachineLearning.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 x64 diff --git a/MachineLearning/tests/MachineLearningTests.csproj b/MachineLearning/tests/MachineLearningTests.csproj index 794fb415906..5762a1a6054 100644 --- a/MachineLearning/tests/MachineLearningTests.csproj +++ b/MachineLearning/tests/MachineLearningTests.csproj @@ -1,4 +1,4 @@ - + diff --git a/Numerics/src/Numerics.csproj b/Numerics/src/Numerics.csproj index f9a242f1e8d..c26ff7edddf 100644 --- a/Numerics/src/Numerics.csproj +++ b/Numerics/src/Numerics.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 @@ -37,7 +37,7 @@ - + diff --git a/Numerics/tests/NumericsTests.csproj b/Numerics/tests/NumericsTests.csproj index 62542b6e1e0..a6ae4fb2aba 100644 --- a/Numerics/tests/NumericsTests.csproj +++ b/Numerics/tests/NumericsTests.csproj @@ -1,4 +1,4 @@ - + diff --git a/Standard/src/Standard.csproj b/Standard/src/Standard.csproj index 78df63647d9..9c4d3cffb3a 100644 --- a/Standard/src/Standard.csproj +++ b/Standard/src/Standard.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 @@ -30,7 +30,7 @@ - + diff --git a/Standard/tests/Standard.Tests.csproj b/Standard/tests/Standard.Tests.csproj index 84d317fdcb2..28689687eb9 100644 --- a/Standard/tests/Standard.Tests.csproj +++ b/Standard/tests/Standard.Tests.csproj @@ -1,4 +1,4 @@ - + From bf09e3e916878852df4e2f3173108395b5470277 Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Fri, 31 Jan 2020 09:58:56 -0800 Subject: [PATCH 17/43] One more project file update. --- Chemistry/src/Runtime/Runtime.csproj | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Chemistry/src/Runtime/Runtime.csproj b/Chemistry/src/Runtime/Runtime.csproj index 8e8b3c09c51..329b463b03b 100644 --- a/Chemistry/src/Runtime/Runtime.csproj +++ b/Chemistry/src/Runtime/Runtime.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 @@ -16,7 +16,7 @@ - + From caee3d64593b655045b9c9e652fdc80e9b20978c Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Fri, 31 Jan 2020 09:59:24 -0800 Subject: [PATCH 18/43] Added model structure to SequentialModel. --- MachineLearning/src/Classification.qs | 19 ++++----- MachineLearning/src/GradientEstimation.qs | 27 ++++++------ MachineLearning/src/Structure.qs | 39 +++++++++--------- MachineLearning/src/Training.qs | 50 +++++++++++------------ MachineLearning/src/Types.qs | 14 +++---- MachineLearning/src/Validation.qs | 3 +- 6 files changed, 71 insertions(+), 81 deletions(-) diff --git a/MachineLearning/src/Classification.qs b/MachineLearning/src/Classification.qs index 4f998eea008..a7456f6128a 100644 --- a/MachineLearning/src/Classification.qs +++ b/MachineLearning/src/Classification.qs @@ -10,13 +10,12 @@ namespace Microsoft.Quantum.MachineLearning { operation _PrepareClassification( encoder : (LittleEndian => Unit is Adj + Ctl), - structure : SequentialClassifierStructure, - parameters : Double[], + model : SequentialModel, target : Qubit[] ) : Unit is Adj { encoder(LittleEndian(target)); - ApplySequentialClassifier(structure, parameters, target); + ApplySequentialClassifier(model, target); } /// # Summary @@ -41,17 +40,16 @@ namespace Microsoft.Quantum.MachineLearning { /// An estimate of the classification probability for the given sample. operation EstimateClassificationProbability( tolerance : Double, - parameters : Double[], - structure : SequentialClassifierStructure, + model : SequentialModel, sample : Double[], nMeasurements: Int ) : Double { let nQubits = FeatureRegisterSize(sample); - let circEnc = ApproximateInputEncoder(tolerance / IntAsDouble(Length(structure!)), sample); + let circEnc = ApproximateInputEncoder(tolerance / IntAsDouble(Length(model::Structure)), sample); let encodedSample = StateGenerator(nQubits, circEnc); return 1.0 - EstimateFrequencyA( - _PrepareClassification(encodedSample::Apply, structure, parameters, _), + _PrepareClassification(encodedSample::Prepare, model, _), _TailMeasurement(encodedSample::NQubits), encodedSample::NQubits, nMeasurements @@ -81,16 +79,15 @@ namespace Microsoft.Quantum.MachineLearning { /// sample. operation EstimateClassificationProbabilities( tolerance : Double, - parameters : Double[], - structure : SequentialClassifierStructure, + model : SequentialModel, samples : Double[][], nMeasurements : Int ) : Double[] { - let effectiveTolerance = tolerance / IntAsDouble(Length(structure!)); + let effectiveTolerance = tolerance / IntAsDouble(Length(model::Structure)); return ForEach( EstimateClassificationProbability( - effectiveTolerance, parameters, structure, _, nMeasurements + effectiveTolerance, model, _, nMeasurements ), samples ); diff --git a/MachineLearning/src/GradientEstimation.qs b/MachineLearning/src/GradientEstimation.qs index 9843328dc25..049e34cbb89 100644 --- a/MachineLearning/src/GradientEstimation.qs +++ b/MachineLearning/src/GradientEstimation.qs @@ -19,15 +19,15 @@ namespace Microsoft.Quantum.MachineLearning { operation _EstimateDerivativeWithParameterShift( inputEncoder : StateGenerator, - structure : SequentialClassifierStructure, + model : SequentialModel, parameters : (Double[], Double[]), nQubits : Int, nMeasurements : Int ) : Double { return EstimateRealOverlapBetweenStates( - _ApplyLEOperationToRawRegister(inputEncoder::Apply, _), - ApplySequentialClassifier(structure, Fst(parameters), _), - ApplySequentialClassifier(structure, Snd(parameters), _), + _ApplyLEOperationToRawRegister(inputEncoder::Prepare, _), + ApplySequentialClassifier(model w/ Parameters <- Fst(parameters), _), + ApplySequentialClassifier(model w/ Parameters <- Snd(parameters), _), nQubits, nMeasurements ); } @@ -56,8 +56,7 @@ namespace Microsoft.Quantum.MachineLearning { /// This operation uses a Hadamard test and the parameter shift technique /// together to estimate the gradient. operation EstimateGradient( - structure : SequentialClassifierStructure, - param : Double[], + model : SequentialModel, sg : StateGenerator, nMeasurements : Int ) @@ -75,17 +74,17 @@ namespace Microsoft.Quantum.MachineLearning { // Now, suppose a gate at which we differentiate is the (Controlled R(\theta))([k0,k1,...,kr],[target]) // and we want a unitary description of its \theta-derivative. It can be written as // 1/2 {(Controlled R(\theta'))([k0,k1,...,kr],[target]) - (Controlled Z)([k1,...,kr],[k0])(Controlled R(\theta'))([k0,k1,...,kr],[target])} - mutable grad = ConstantArray(Length(param), 0.0); - let nQubits = MaxI(NQubitsRequired(structure), sg::NQubits); + mutable grad = ConstantArray(Length(model::Parameters), 0.0); + let nQubits = MaxI(NQubitsRequired(model), sg::NQubits); - for (gate in structure!) { - let paramShift = (param + [0.0]) + for (gate in model::Structure) { + let paramShift = (model::Parameters + [0.0]) // Shift the corresponding parameter. - w/ gate::Index <- (param[gate::Index] + PI()); + w/ gate::Index <- (model::Parameters[gate::Index] + PI()); // NB: This the *antiderivative* of the bracket let newDer = _EstimateDerivativeWithParameterShift( - sg, structure, (param, paramShift), nQubits, nMeasurements + sg, model, (model::Parameters, paramShift), nQubits, nMeasurements ); if (IsEmpty(gate::Span::ControlIndices)) { //uncontrolled gate @@ -93,11 +92,11 @@ namespace Microsoft.Quantum.MachineLearning { } else { //controlled gate let controlledShift = paramShift - w/ gate::Index <- (param[gate::Index] + 3.0 * PI()); + w/ gate::Index <- (model::Parameters[gate::Index] + 3.0 * PI()); // Assumption: any rotation R has the property that R(\theta + 2 Pi) = (-1) R(\theta). // NB: This the *antiderivative* of the bracket let newDer1 = _EstimateDerivativeWithParameterShift( - sg, structure, (param, controlledShift), nQubits, nMeasurements + sg, model, (model::Parameters, controlledShift), nQubits, nMeasurements ); set grad w/= gate::Index <- (grad[gate::Index] + 0.5 * (newDer - newDer1)); } diff --git a/MachineLearning/src/Structure.qs b/MachineLearning/src/Structure.qs index 41823f43782..1e5fcd004aa 100644 --- a/MachineLearning/src/Structure.qs +++ b/MachineLearning/src/Structure.qs @@ -18,10 +18,10 @@ namespace Microsoft.Quantum.MachineLearning { /// # Output /// The minimum size of a register on which the sequential classifier /// may be applied. - function NQubitsRequired(structure : SequentialClassifierStructure) + function NQubitsRequired(model : SequentialModel) : Int { mutable nQubitsRequired = 0; - for (gate in structure!) { + for (gate in model::Structure) { set nQubitsRequired = Fold( MaxI, 0, gate::Span::ControlIndices + [ @@ -45,14 +45,13 @@ namespace Microsoft.Quantum.MachineLearning { /// ## qubits /// A target register to which the classifier should be applied. operation ApplySequentialClassifier( - structure : SequentialClassifierStructure, - parameters : Double[], + model : SequentialModel, qubits : Qubit[] ) : (Unit) is Adj + Ctl { - for (gate in structure!) { - if (gate::Index < Length(parameters)) { - let input = (gate::Axis, parameters[gate::Index], qubits[gate::Span::TargetIndex]); + for (gate in model::Structure) { + if (gate::Index < Length(model::Parameters)) { + let input = (gate::Axis, model::Parameters[gate::Index], qubits[gate::Span::TargetIndex]); if (IsEmpty(gate::Span::ControlIndices)) { // Uncontrolled rotation of target R(input); @@ -84,28 +83,28 @@ namespace Microsoft.Quantum.MachineLearning { return _CallFlipped(fn, _, _); } - function LocalRotationsLayer(nQubits : Int, axis : Pauli) : SequentialClassifierStructure { + function LocalRotationsLayer(nQubits : Int, axis : Pauli) : ControlledRotation[] { // [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] - return SequentialClassifierStructure(Mapped( + return Mapped( _Flipped(ControlledRotation(_, axis, _)), Enumerated( _UncontrolledSpanSequence(SequenceI(0, nQubits - 1)) ) - )); + ); } - function PartialRotationsLayer(idxsQubits : Int[], axis : Pauli) : SequentialClassifierStructure { + function PartialRotationsLayer(idxsQubits : Int[], axis : Pauli) : ControlledRotation[] { // [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] - return SequentialClassifierStructure(Mapped( + return Mapped( _Flipped(ControlledRotation(_, axis, _)), Enumerated( _UncontrolledSpanSequence(idxsQubits) ) - )); + ); } - function CyclicEntanglingLayer(nQubits : Int, axis : Pauli, stride : Int) : SequentialClassifierStructure { + function CyclicEntanglingLayer(nQubits : Int, axis : Pauli, stride : Int) : ControlledRotation[] { mutable rotations = new ControlledRotation[0]; for (idxTarget in 0..nQubits - 1) { set rotations += [ControlledRotation( @@ -116,19 +115,19 @@ namespace Microsoft.Quantum.MachineLearning { axis, idxTarget )]; } - return SequentialClassifierStructure(rotations); + return rotations; } - function CombinedStructure(layers : SequentialClassifierStructure[]) : SequentialClassifierStructure { - mutable combined = (Head(layers))!; + function CombinedStructure(layers : ControlledRotation[][]) : ControlledRotation[] { + mutable combined = Head(layers); mutable offset = Length(combined); for (layer in Rest(layers)) { - for (gate in layer!) { + for (gate in layer) { set combined += [gate w/ Index <- gate::Index + offset]; } - set offset += Length(layer!); + set offset += Length(layer); } - return SequentialClassifierStructure(combined); + return combined; } } \ No newline at end of file diff --git a/MachineLearning/src/Training.qs b/MachineLearning/src/Training.qs index ac0fa94eb52..939c5bcf0a3 100644 --- a/MachineLearning/src/Training.qs +++ b/MachineLearning/src/Training.qs @@ -79,29 +79,28 @@ namespace Microsoft.Quantum.MachineLearning { /// - Microsoft.Quantum.MachineLearning.TrainSequentialClassifierAtModel /// - Microsoft.Quantum.MachineLearning.ValidateSequentialClassifier operation TrainSequentialClassifier( - structure : SequentialClassifierStructure, - parameterSource : Double[][], + models : SequentialModel[], samples : LabeledSample[], options : TrainingOptions, trainingSchedule : SamplingSchedule, validationSchedule : SamplingSchedule ) : SequentialModel { - mutable bestSoFar = SequentialModel([-1E12], -2.0); + mutable bestSoFar = Default() + w/ Structure <- (Head(models))::Structure; mutable bestValidation = Length(samples) + 1; let features = Mapped(_Features, samples); let labels = Mapped(_Label, samples); - for ((idxStart, parameters) in Enumerated(parameterSource)) { - Message($"Beginning training at start point #{idxStart}..."); + for ((idxModel, model) in Enumerated(models)) { + Message($"Beginning training at start point #{idxModel}..."); let proposedUpdate = TrainSequentialClassifierAtModel( - structure, SequentialModel(parameters, 0.0), + model, samples, options, trainingSchedule, 1 ); let probabilities = EstimateClassificationProbabilities( options::Tolerance, - proposedUpdate::Parameters, - structure, + proposedUpdate, Sampled(validationSchedule, features), options::NMeasurements ); @@ -147,12 +146,12 @@ namespace Microsoft.Quantum.MachineLearning { operation _RunSingleTrainingStep( miniBatch : LabeledSample[], options : TrainingOptions, - param : Double[], gates : SequentialClassifierStructure + model : SequentialModel ) - : (Double, Double[]) { - mutable batchGradient = ConstantArray(Length(param), 0.0); - let nQubits = MaxI(FeatureRegisterSize(miniBatch[0]::Features), NQubitsRequired(gates)); - let effectiveTolerance = options::Tolerance / IntAsDouble(Length(gates!)); + : (Double, SequentialModel) { + mutable batchGradient = ConstantArray(Length(model::Parameters), 0.0); + let nQubits = MaxI(FeatureRegisterSize(miniBatch[0]::Features), NQubitsRequired(model)); + let effectiveTolerance = options::Tolerance / IntAsDouble(Length(model::Structure)); for (sample in miniBatch) { mutable err = IntAsDouble(sample::Label); @@ -164,18 +163,18 @@ namespace Microsoft.Quantum.MachineLearning { ApproximateInputEncoder(effectiveTolerance, sample::Features) ); let grad = EstimateGradient( - gates, param, stateGenerator, + model, stateGenerator, options::NMeasurements ); - for (ip in 0..(Length(param) - 1)) { + for (ip in 0..(Length(model::Parameters) - 1)) { // GradientClassicalSample actually computes antigradient, but err*grad corrects it back to gradient set batchGradient w/= ip <- (batchGradient[ip] + options::LearningRate * err * grad[ip]); } } - let updatedParameters = Mapped(PlusD, Zip(param, batchGradient)); + let updatedParameters = Mapped(PlusD, Zip(model::Parameters, batchGradient)); // TODO:REVIEW: Ok to interpret utility as size of the overall move? - return (SquaredNorm(batchGradient), updatedParameters); + return (SquaredNorm(batchGradient), model w/ Parameters <- updatedParameters); } @@ -211,7 +210,6 @@ namespace Microsoft.Quantum.MachineLearning { samples : LabeledSample[], schedule : SamplingSchedule, periodScore: Int, options : TrainingOptions, - structure : SequentialClassifierStructure, model : SequentialModel, nPreviousBestMisses : Int ) @@ -224,7 +222,7 @@ namespace Microsoft.Quantum.MachineLearning { let inferredLabels = InferredLabels( model::Bias, EstimateClassificationProbabilities( - options::Tolerance, model::Parameters, structure, + options::Tolerance, model, features, options::NMeasurements ) ); @@ -238,15 +236,15 @@ namespace Microsoft.Quantum.MachineLearning { ) ); for (minibatch in minibatches) { - let (utility, updatedParameters) = _RunSingleTrainingStep( - minibatch, options, bestSoFar::Parameters, structure + let (utility, updatedModel) = _RunSingleTrainingStep( + minibatch, options, bestSoFar ); if (utility > 0.0000001) { // There has been some good parameter update. // Check if it actually improves things, and if so, // commit it. let probabilities = EstimateClassificationProbabilities( - options::Tolerance, updatedParameters, structure, + options::Tolerance, updatedModel, features, options::NMeasurements ); let updatedBias = _UpdatedBias( @@ -260,7 +258,7 @@ namespace Microsoft.Quantum.MachineLearning { )); if (nMisses < nBestMisses) { set nBestMisses = nMisses; - set bestSoFar = SequentialModel(updatedParameters, updatedBias); + set bestSoFar = updatedModel; } } @@ -308,7 +306,6 @@ namespace Microsoft.Quantum.MachineLearning { /// - Microsoft.Quantum.MachineLearning.TrainSequentialClassifier /// - Microsoft.Quantum.MachineLearning.ValidateSequentialClassifier operation TrainSequentialClassifierAtModel( - gates : SequentialClassifierStructure, model : SequentialModel, samples : LabeledSample[], options : TrainingOptions, @@ -320,7 +317,7 @@ namespace Microsoft.Quantum.MachineLearning { let features = Mapped(_Features, samples); let actualLabels = Mapped(_Label, samples); let probabilities = EstimateClassificationProbabilities( - options::Tolerance, model::Parameters, gates, + options::Tolerance, model, features, options::NMeasurements ); mutable bestSoFar = model @@ -349,7 +346,7 @@ namespace Microsoft.Quantum.MachineLearning { options w/ LearningRate <- lrate w/ MinibatchSize <- batchSize, - gates, current, + current, nBestMisses ); if (nMisses < nBestMisses) { @@ -382,6 +379,7 @@ namespace Microsoft.Quantum.MachineLearning { // and bias before updating. if (nStalls > options::MaxStalls / 2) { set current = SequentialModel( + model::Structure, ForEach(_RandomlyRescale(options::StochasticRescaleFactor, _), proposedUpdate::Parameters), _RandomlyRescale(options::StochasticRescaleFactor, proposedUpdate::Bias) ); diff --git a/MachineLearning/src/Types.qs b/MachineLearning/src/Types.qs index 3174cad8db5..219dd9f4a25 100644 --- a/MachineLearning/src/Types.qs +++ b/MachineLearning/src/Types.qs @@ -19,15 +19,18 @@ namespace Microsoft.Quantum.MachineLearning { Index: Int ); - /// Abstraction for sequence of gates - newtype SequentialClassifierStructure = ControlledRotation[]; + newtype SequentialModel = ( + Structure: ControlledRotation[], + Parameters: Double[], + Bias: Double + ); /// Abstraction for state preparation /// Fst(StateGenerator) is the number of qubits /// Snd(Stategenerator) is a circuit to prepare subject state newtype StateGenerator = ( NQubits: Int, - Apply: (LittleEndian => Unit is Adj + Ctl) + Prepare: (LittleEndian => Unit is Adj + Ctl) ); /// Convention: negative Snd(labledSample) signifies the last sample in a batch @@ -119,9 +122,4 @@ namespace Microsoft.Quantum.MachineLearning { ); } - newtype SequentialModel = ( - Parameters: Double[], - Bias: Double - ); - } diff --git a/MachineLearning/src/Validation.qs b/MachineLearning/src/Validation.qs index 99eefbc8273..afb7896e577 100644 --- a/MachineLearning/src/Validation.qs +++ b/MachineLearning/src/Validation.qs @@ -55,7 +55,6 @@ namespace Microsoft.Quantum.MachineLearning { } operation ValidateSequentialClassifier( - gates: SequentialClassifierStructure, model : SequentialModel, samples : LabeledSample[], tolerance: Double, @@ -66,7 +65,7 @@ namespace Microsoft.Quantum.MachineLearning { let features = Mapped(_Features, samples); let labels = Sampled(validationSchedule, Mapped(_Label, samples)); let probabilities = EstimateClassificationProbabilities( - tolerance, model::Parameters, gates, + tolerance, model, Sampled(validationSchedule, features), nMeasurements ); let localPL = InferredLabels(model::Bias, probabilities); From b0da363a38b1b8cc356f00068324082d4e2a1144 Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Fri, 31 Jan 2020 10:24:30 -0800 Subject: [PATCH 19/43] Combined GateSpan and ControlledRotation. --- MachineLearning/src/GradientEstimation.qs | 2 +- MachineLearning/src/Structure.qs | 20 ++++++++++---------- MachineLearning/src/Training.qs | 6 ++++-- MachineLearning/src/Types.qs | 11 ++++------- 4 files changed, 19 insertions(+), 20 deletions(-) diff --git a/MachineLearning/src/GradientEstimation.qs b/MachineLearning/src/GradientEstimation.qs index 049e34cbb89..1a94e8c264e 100644 --- a/MachineLearning/src/GradientEstimation.qs +++ b/MachineLearning/src/GradientEstimation.qs @@ -86,7 +86,7 @@ namespace Microsoft.Quantum.MachineLearning { let newDer = _EstimateDerivativeWithParameterShift( sg, model, (model::Parameters, paramShift), nQubits, nMeasurements ); - if (IsEmpty(gate::Span::ControlIndices)) { + if (IsEmpty(gate::ControlIndices)) { //uncontrolled gate set grad w/= gate::Index <- grad[gate::Index] + newDer; } else { diff --git a/MachineLearning/src/Structure.qs b/MachineLearning/src/Structure.qs index 1e5fcd004aa..5ab0486be02 100644 --- a/MachineLearning/src/Structure.qs +++ b/MachineLearning/src/Structure.qs @@ -24,8 +24,8 @@ namespace Microsoft.Quantum.MachineLearning { for (gate in model::Structure) { set nQubitsRequired = Fold( MaxI, 0, - gate::Span::ControlIndices + [ - gate::Span::TargetIndex, + gate::ControlIndices + [ + gate::TargetIndex, nQubitsRequired ] ); @@ -51,22 +51,22 @@ namespace Microsoft.Quantum.MachineLearning { : (Unit) is Adj + Ctl { for (gate in model::Structure) { if (gate::Index < Length(model::Parameters)) { - let input = (gate::Axis, model::Parameters[gate::Index], qubits[gate::Span::TargetIndex]); - if (IsEmpty(gate::Span::ControlIndices)) { + let input = (gate::Axis, model::Parameters[gate::Index], qubits[gate::TargetIndex]); + if (IsEmpty(gate::ControlIndices)) { // Uncontrolled rotation of target R(input); } else { //TODO: should one validate the control indices first? - (Controlled R)(Subarray(gate::Span::ControlIndices, qubits), input); + (Controlled R)(Subarray(gate::ControlIndices, qubits), input); } } } } - function _UncontrolledSpanSequence(idxsQubits : Int[]) : GateSpan[] { - return Mapped( - GateSpan(_, new Int[0]), - idxsQubits + function _UncontrolledSpanSequence(idxsQubits : Int[]) : (Int, Int[])[] { + return Zip( + idxsQubits, + ConstantArray(Length(idxsQubits), new Int[0]) ); } @@ -108,7 +108,7 @@ namespace Microsoft.Quantum.MachineLearning { mutable rotations = new ControlledRotation[0]; for (idxTarget in 0..nQubits - 1) { set rotations += [ControlledRotation( - GateSpan( + ( idxTarget, [(idxTarget + stride) % nQubits] ), diff --git a/MachineLearning/src/Training.qs b/MachineLearning/src/Training.qs index 939c5bcf0a3..22db3cf1aba 100644 --- a/MachineLearning/src/Training.qs +++ b/MachineLearning/src/Training.qs @@ -172,9 +172,11 @@ namespace Microsoft.Quantum.MachineLearning { } } - let updatedParameters = Mapped(PlusD, Zip(model::Parameters, batchGradient)); // TODO:REVIEW: Ok to interpret utility as size of the overall move? - return (SquaredNorm(batchGradient), model w/ Parameters <- updatedParameters); + return ( + SquaredNorm(batchGradient), + model w/ Parameters <- Mapped(PlusD, Zip(model::Parameters, batchGradient)) + ); } diff --git a/MachineLearning/src/Types.qs b/MachineLearning/src/Types.qs index 219dd9f4a25..3c2f18b0405 100644 --- a/MachineLearning/src/Types.qs +++ b/MachineLearning/src/Types.qs @@ -5,16 +5,13 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Canon; open Microsoft.Quantum.Arithmetic; - /// Qubit span of a multicontrolled single-qubit gate - newtype GateSpan = ( - TargetIndex: Int, - ControlIndices: Int[] - ); - /// One-parameter controlled rotation gate triplet: /// (control structure, rotation axis, index of the rotation parameter) newtype ControlledRotation = ( - Span: GateSpan, + ( + TargetIndex: Int, + ControlIndices: Int[] + ), Axis: Pauli, Index: Int ); From 5590c97fd9cfbebe0c64eec3d6968e852674633e Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Fri, 31 Jan 2020 11:21:12 -0800 Subject: [PATCH 20/43] =?UTF-8?q?Types.qs=20=E2=98=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- MachineLearning/src/GradientEstimation.qs | 8 +- MachineLearning/src/Structure.qs | 6 +- MachineLearning/src/Types.qs | 122 ++++++++++++++++++++-- 3 files changed, 118 insertions(+), 18 deletions(-) diff --git a/MachineLearning/src/GradientEstimation.qs b/MachineLearning/src/GradientEstimation.qs index 1a94e8c264e..5008b191e27 100644 --- a/MachineLearning/src/GradientEstimation.qs +++ b/MachineLearning/src/GradientEstimation.qs @@ -80,7 +80,7 @@ namespace Microsoft.Quantum.MachineLearning { for (gate in model::Structure) { let paramShift = (model::Parameters + [0.0]) // Shift the corresponding parameter. - w/ gate::Index <- (model::Parameters[gate::Index] + PI()); + w/ gate::ParameterIndex <- (model::Parameters[gate::ParameterIndex] + PI()); // NB: This the *antiderivative* of the bracket let newDer = _EstimateDerivativeWithParameterShift( @@ -88,17 +88,17 @@ namespace Microsoft.Quantum.MachineLearning { ); if (IsEmpty(gate::ControlIndices)) { //uncontrolled gate - set grad w/= gate::Index <- grad[gate::Index] + newDer; + set grad w/= gate::ParameterIndex <- grad[gate::ParameterIndex] + newDer; } else { //controlled gate let controlledShift = paramShift - w/ gate::Index <- (model::Parameters[gate::Index] + 3.0 * PI()); + w/ gate::ParameterIndex <- (model::Parameters[gate::ParameterIndex] + 3.0 * PI()); // Assumption: any rotation R has the property that R(\theta + 2 Pi) = (-1) R(\theta). // NB: This the *antiderivative* of the bracket let newDer1 = _EstimateDerivativeWithParameterShift( sg, model, (model::Parameters, controlledShift), nQubits, nMeasurements ); - set grad w/= gate::Index <- (grad[gate::Index] + 0.5 * (newDer - newDer1)); + set grad w/= gate::ParameterIndex <- (grad[gate::ParameterIndex] + 0.5 * (newDer - newDer1)); } } return grad; diff --git a/MachineLearning/src/Structure.qs b/MachineLearning/src/Structure.qs index 5ab0486be02..6606f51940c 100644 --- a/MachineLearning/src/Structure.qs +++ b/MachineLearning/src/Structure.qs @@ -50,8 +50,8 @@ namespace Microsoft.Quantum.MachineLearning { ) : (Unit) is Adj + Ctl { for (gate in model::Structure) { - if (gate::Index < Length(model::Parameters)) { - let input = (gate::Axis, model::Parameters[gate::Index], qubits[gate::TargetIndex]); + if (gate::ParameterIndex < Length(model::Parameters)) { + let input = (gate::Axis, model::Parameters[gate::ParameterIndex], qubits[gate::TargetIndex]); if (IsEmpty(gate::ControlIndices)) { // Uncontrolled rotation of target R(input); @@ -123,7 +123,7 @@ namespace Microsoft.Quantum.MachineLearning { mutable offset = Length(combined); for (layer in Rest(layers)) { for (gate in layer) { - set combined += [gate w/ Index <- gate::Index + offset]; + set combined += [gate w/ ParameterIndex <- gate::ParameterIndex + offset]; } set offset += Length(layer); } diff --git a/MachineLearning/src/Types.qs b/MachineLearning/src/Types.qs index 3c2f18b0405..23d862ca3b1 100644 --- a/MachineLearning/src/Types.qs +++ b/MachineLearning/src/Types.qs @@ -5,46 +5,104 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Canon; open Microsoft.Quantum.Arithmetic; - /// One-parameter controlled rotation gate triplet: - /// (control structure, rotation axis, index of the rotation parameter) + /// # Summary + /// Describes a controlled rotation in terms of its target and control + /// indices, rotation axis, and index into a model parameter vector. + /// + /// # Input + /// ## TargetIndex + /// Index of the target qubit for this controlled rotation. + /// ## ControlIndices + /// An array of the control qubit indices for this rotation. + /// ## Axis + /// The axis for this rotation. + /// ## ParameterIndex + /// An index into a model parameter vector describing the angle + /// for this rotation. + /// + /// # Remarks + /// An uncontrolled rotation can be represented by setting `ControlIndices` + /// to an empty array of indexes, `new Int[0]`. + /// + /// # Example + /// The following represents a rotation about the $X$-axis of the first + /// qubit in a register, controlled on the second qubit, and with an + /// angle given by the fourth parameter in a sequential model: + /// ```Q# + /// let controlledRotation = ControlledRotation( + /// (0, [1]), + /// PauliX, + /// 3 + /// ) + /// ``` newtype ControlledRotation = ( ( TargetIndex: Int, ControlIndices: Int[] ), Axis: Pauli, - Index: Int + ParameterIndex: Int ); + /// # Summary + /// Describes a quantum classifier model comprised of a sequence of + /// parameterized and controlled rotations, an assignment of rotation + /// angles, and a bias between the two classes recognized by the model. + /// + /// # Input + /// ## Structure + /// The sequence of controlled rotations used to classify inputs. + /// ## Parameters + /// An assignment of rotation angles to the given classification structure. + /// ## Bias + /// The bias between the two classes recognized by this classifier. + /// + /// # References + /// - [arXiv:1804.00633](https://arxiv.org/abs/1804.00633) newtype SequentialModel = ( Structure: ControlledRotation[], Parameters: Double[], Bias: Double ); - /// Abstraction for state preparation - /// Fst(StateGenerator) is the number of qubits - /// Snd(Stategenerator) is a circuit to prepare subject state + /// # Summary + /// Describes an operation that prepares a given input to a sequential + /// classifier. + /// + /// # Input + /// ## NQubits + /// The nubmer of qubits on which the encoded input is defined. + /// ## Prepare + /// An operation which prepares the encoded input on a little-endian + /// register of `NQubits` qubits. newtype StateGenerator = ( NQubits: Int, Prepare: (LittleEndian => Unit is Adj + Ctl) ); - /// Convention: negative Snd(labledSample) signifies the last sample in a batch + /// # Summary + /// A sample, labeled with a class to which that sample belongs. + /// + /// # Input + /// ## Features + /// A vector of features for the given sample. + /// ## Label + /// An integer label for the class to which this sample belongs. newtype LabeledSample = ( Features: Double[], Label: Int ); + /// # Summary + /// A schedule for drawing batches from a set of samples. + newtype SamplingSchedule = Range[]; + // Here, we define a couple private accessor functions for LabeledSample, - // in lieu of having lambda support. These should not be used in external + // in lieu of having lambda support. These SHOULD NOT be used in external // code. function _Features(sample : LabeledSample) : Double[] { return sample::Features; } function _Label(sample : LabeledSample) : Int { return sample::Label; } - /// Abstraction for a two-level range of indices - newtype SamplingSchedule = Range[]; - /// # Summary /// Returns the number of elements in a given sampling schedule. /// @@ -85,10 +143,52 @@ namespace Microsoft.Quantum.MachineLearning { return sampled; } + /// # Summary + /// The results from having validated a classifier against a set of + /// samples. + /// + /// # Input + /// ## NMisclassifications + /// The number of misclassifications observed during validation. newtype ValidationResults = ( NMisclassifications: Int ); + /// # Summary + /// A collection of options to be used in training quantum classifiers. + /// + /// # Input + /// ## LearningRate + /// The learning rate by which gradients should be rescaled when updating + /// model parameters during training steps. + /// ## Tolerance + /// The approximation tolerance to use when preparing samples as quantum + /// states. + /// ## MinibatchSize + /// The number of samples to use in each training minibatch. + /// ## NMeasurements + /// The number of times to measure each classification result in order to + /// estimate the classification probability. + /// ## MaxEpochs + /// The maximum number of epochs to train each model for. + /// ## MaxStalls + /// The maximum number of times a training epoch is allowed to stall + /// (approximately zero gradient) before failing. + /// ## StochasticRescaleFactor + /// The amount to rescale stalled models by before retrying an update. + /// + /// # Remarks + /// This UDT should not be created directly, but rather should be specified + /// by calling @"microsoft.quantum.machinelearning.defaulttrainingoptions" + /// and then using the `w/` operator to override different defaults. + /// + /// For example, to use 100,000 measurements: + /// ```Q# + /// let options = DefaultTrainingOptions() w/ NMeasurements <- 100000; + /// ``` + /// + /// # References + /// - [arXiv:1804.00633](https://arxiv.org/abs/1804.00633) newtype TrainingOptions = ( LearningRate: Double, Tolerance: Double, From c3889e37bcc8453be9e605e8a92490cb14256814 Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Fri, 31 Jan 2020 11:22:46 -0800 Subject: [PATCH 21/43] =?UTF-8?q?Classification.qs=20=E2=98=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- MachineLearning/src/Classification.qs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/MachineLearning/src/Classification.qs b/MachineLearning/src/Classification.qs index a7456f6128a..755d41de77b 100644 --- a/MachineLearning/src/Classification.qs +++ b/MachineLearning/src/Classification.qs @@ -27,10 +27,9 @@ namespace Microsoft.Quantum.MachineLearning { /// ## tolerance /// The tolerance to allow in encoding the sample into a state preparation /// operation. - /// ## parameters - /// A parameterization of the given sequential classifier. - /// ## structure - /// The structure of the given sequential classifier. + /// ## model + /// The sequential model to be used to estimate the classification + /// probability for the given sample. /// ## sample /// The feature vector for the sample to be classified. /// ## nMeasurements @@ -65,10 +64,9 @@ namespace Microsoft.Quantum.MachineLearning { /// ## tolerance /// The tolerance to allow in encoding the sample into a state preparation /// operation. - /// ## parameters - /// A parameterization of the given sequential classifier. - /// ## structure - /// The structure of the given sequential classifier. + /// ## model + /// The sequential model to be used to estimate the classification + /// probabilities for the given samples. /// ## samples /// An array of feature vectors for each sample to be classified. /// ## nMeasurements From a158b39ef92a920fdd53feb3807fd40fe376d47b Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Fri, 31 Jan 2020 11:26:38 -0800 Subject: [PATCH 22/43] =?UTF-8?q?GradientEstimation.qs=20=E2=98=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- MachineLearning/src/GradientEstimation.qs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/MachineLearning/src/GradientEstimation.qs b/MachineLearning/src/GradientEstimation.qs index 5008b191e27..767b161d4b8 100644 --- a/MachineLearning/src/GradientEstimation.qs +++ b/MachineLearning/src/GradientEstimation.qs @@ -34,15 +34,12 @@ namespace Microsoft.Quantum.MachineLearning { /// # Summary /// Estimates the training gradient for a sequential classifier at a - /// particular set of parameters and for a given encoded input. + /// particular model and for a given encoded input. /// /// # Input - /// ## structure - /// The structure of the sequential classifier as a sequence of quantum - /// operations. - /// ## param - /// A set of parameters for the given classifier structure. - /// ## sg + /// ## model + /// The sequential model whose gradient is to be estimated. + /// ## encodedInput /// An input to the sequential classifier, encoded into a state preparation /// operation. /// ## nMeasurements @@ -57,7 +54,7 @@ namespace Microsoft.Quantum.MachineLearning { /// together to estimate the gradient. operation EstimateGradient( model : SequentialModel, - sg : StateGenerator, + encodedInput : StateGenerator, nMeasurements : Int ) : (Double[]) { @@ -75,7 +72,7 @@ namespace Microsoft.Quantum.MachineLearning { // and we want a unitary description of its \theta-derivative. It can be written as // 1/2 {(Controlled R(\theta'))([k0,k1,...,kr],[target]) - (Controlled Z)([k1,...,kr],[k0])(Controlled R(\theta'))([k0,k1,...,kr],[target])} mutable grad = ConstantArray(Length(model::Parameters), 0.0); - let nQubits = MaxI(NQubitsRequired(model), sg::NQubits); + let nQubits = MaxI(NQubitsRequired(model), encodedInput::NQubits); for (gate in model::Structure) { let paramShift = (model::Parameters + [0.0]) @@ -84,7 +81,7 @@ namespace Microsoft.Quantum.MachineLearning { // NB: This the *antiderivative* of the bracket let newDer = _EstimateDerivativeWithParameterShift( - sg, model, (model::Parameters, paramShift), nQubits, nMeasurements + encodedInput, model, (model::Parameters, paramShift), nQubits, nMeasurements ); if (IsEmpty(gate::ControlIndices)) { //uncontrolled gate @@ -96,7 +93,7 @@ namespace Microsoft.Quantum.MachineLearning { // Assumption: any rotation R has the property that R(\theta + 2 Pi) = (-1) R(\theta). // NB: This the *antiderivative* of the bracket let newDer1 = _EstimateDerivativeWithParameterShift( - sg, model, (model::Parameters, controlledShift), nQubits, nMeasurements + encodedInput, model, (model::Parameters, controlledShift), nQubits, nMeasurements ); set grad w/= gate::ParameterIndex <- (grad[gate::ParameterIndex] + 0.5 * (newDer - newDer1)); } From a1771fbe9040e17506a18562f2e7706ec8682aa8 Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Fri, 31 Jan 2020 11:43:53 -0800 Subject: [PATCH 23/43] =?UTF-8?q?InputEncoding.qs=20=E2=98=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- MachineLearning/src/Classification.qs | 4 +- MachineLearning/src/InputEncoding.qs | 52 +++++++++++++++------ MachineLearning/src/Training.qs | 8 ++-- MachineLearning/tests/InputEncodingTests.qs | 2 +- 4 files changed, 45 insertions(+), 21 deletions(-) diff --git a/MachineLearning/src/Classification.qs b/MachineLearning/src/Classification.qs index 755d41de77b..0118979e150 100644 --- a/MachineLearning/src/Classification.qs +++ b/MachineLearning/src/Classification.qs @@ -44,9 +44,7 @@ namespace Microsoft.Quantum.MachineLearning { nMeasurements: Int ) : Double { - let nQubits = FeatureRegisterSize(sample); - let circEnc = ApproximateInputEncoder(tolerance / IntAsDouble(Length(model::Structure)), sample); - let encodedSample = StateGenerator(nQubits, circEnc); + let encodedSample = ApproximateInputEncoder(tolerance / IntAsDouble(Length(model::Structure)), sample); return 1.0 - EstimateFrequencyA( _PrepareClassification(encodedSample::Prepare, model, _), _TailMeasurement(encodedSample::NQubits), diff --git a/MachineLearning/src/InputEncoding.qs b/MachineLearning/src/InputEncoding.qs index d997987ba19..62adda62358 100644 --- a/MachineLearning/src/InputEncoding.qs +++ b/MachineLearning/src/InputEncoding.qs @@ -86,13 +86,16 @@ namespace Microsoft.Quantum.MachineLearning { /// /// # Input /// ## tolerance - /// // TODO + /// The approximation tolerance to be used in encoding the given + /// coefficients into an input state. /// ## coefficients - /// // TODO + /// The coefficients to be encoded into an input state. + /// /// # Output - /// // TODO + /// A state preparation operation that prepares the given coefficients + /// as an input state on a given register. function ApproximateInputEncoder(tolerance : Double, coefficients : Double[]) - : (LittleEndian => Unit is Adj + Ctl) { + : StateGenerator { //First quantize the coefficients: for a coef x find such y*tolerance, where y is integer and |x-y*tolerance| \neq tolerance/2 let nCoefficients = Length(coefficients); mutable complexCoefficients = new ComplexPolar[Length(coefficients)]; @@ -113,24 +116,43 @@ namespace Microsoft.Quantum.MachineLearning { // Check if we can apply the explicit two-qubit case. if (_CanApplyTwoQubitCase(coefficients)) { - return _ApplyTwoQubitCase(coefficients, _); + return StateGenerator(2, _ApplyTwoQubitCase(coefficients, _)); } + + let nQubits = FeatureRegisterSize(coefficients); + // If not, we may be able to use a special protocol in the case that // there are only a few negative coefficients. // Here, by a "few," we mean fewer than the number of qubits required // to encode features. if ((cNegative > 0) and (IntAsDouble(cNegative) < Lg(IntAsDouble(Length(coefficients))) + 1.0)) { - return _EncodeSparseNegativeInput(cNegative, tolerance, complexCoefficients, _); + return StateGenerator( + nQubits, + _EncodeSparseNegativeInput(cNegative, tolerance, complexCoefficients, _) + ); } // Finally, we fall back to arbitrary state preparation. - return ApproximatelyPrepareArbitraryState(tolerance, complexCoefficients, _); - } //EncodeNoisyInput + return StateGenerator( + nQubits, + ApproximatelyPrepareArbitraryState(tolerance, complexCoefficients, _) + ); + } - /// Create amplitude encoding of an array of real-valued coefficients - /// The vector of 'coefficients' does not have to be unitary + /// # Summary + /// Given a set of coefficients and a tolerance, returns a state preparation + /// operation that prepares each coefficient as the corresponding amplitude + /// of a computational basis state. + /// + /// # Input + /// ## coefficients + /// The coefficients to be encoded into an input state. + /// + /// # Output + /// A state preparation operation that prepares the given coefficients + /// as an input state on a given register. function InputEncoder(coefficients : Double[]) - : (LittleEndian => Unit is Adj + Ctl) { + : StateGenerator { //default implementation, does not respect sparcity mutable complexCoefficients = new ComplexPolar[Length(coefficients)]; for ((idx, coefficient) in Enumerated(coefficients)) { @@ -141,9 +163,13 @@ namespace Microsoft.Quantum.MachineLearning { ); } if (_CanApplyTwoQubitCase(coefficients)) { - return _ApplyTwoQubitCase(coefficients, _); + return StateGenerator(2, _ApplyTwoQubitCase(coefficients, _)); } - return ApproximatelyPrepareArbitraryState(1E-12, complexCoefficients, _); //this is preparing the state almost exactly so far + //this is preparing the state almost exactly so far + return StateGenerator( + FeatureRegisterSize(coefficients), + ApproximatelyPrepareArbitraryState(1E-12, complexCoefficients, _) + ); } } diff --git a/MachineLearning/src/Training.qs b/MachineLearning/src/Training.qs index 22db3cf1aba..38ef2f313b1 100644 --- a/MachineLearning/src/Training.qs +++ b/MachineLearning/src/Training.qs @@ -158,10 +158,10 @@ namespace Microsoft.Quantum.MachineLearning { if (err < 1.0) { set err = -1.0; //class 0 misclassified to class 1; strive to reduce the probability } - let stateGenerator = StateGenerator( - nQubits, - ApproximateInputEncoder(effectiveTolerance, sample::Features) - ); + let stateGenerator = ApproximateInputEncoder(effectiveTolerance, sample::Features) + // Force the number of qubits in case something else in the + // minibatch requires a larger register. + w/ NQubits <- nQubits; let grad = EstimateGradient( model, stateGenerator, options::NMeasurements diff --git a/MachineLearning/tests/InputEncodingTests.qs b/MachineLearning/tests/InputEncodingTests.qs index 692901bb343..86b75ec450b 100644 --- a/MachineLearning/tests/InputEncodingTests.qs +++ b/MachineLearning/tests/InputEncodingTests.qs @@ -21,7 +21,7 @@ namespace Microsoft.Quantum.MachineLearning.Tests { let encoder = InputEncoder(coefficients); AssertOperationsEqualReferenced(2, _ApplyToBareRegister(PrepareArbitraryState(Mapped(ComplexPolar(_, 0.0), coefficients), _), _), - _ApplyToBareRegister(encoder, _) + _ApplyToBareRegister(encoder::Prepare, _) ); } From 8a3188a29aaef237ce51177564b853f00104d26f Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Fri, 31 Jan 2020 12:16:25 -0800 Subject: [PATCH 24/43] =?UTF-8?q?Structure.qs=20=E2=98=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- MachineLearning/src/Structure.qs | 82 ++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/MachineLearning/src/Structure.qs b/MachineLearning/src/Structure.qs index 6606f51940c..53d5d0b0d46 100644 --- a/MachineLearning/src/Structure.qs +++ b/MachineLearning/src/Structure.qs @@ -83,6 +83,20 @@ namespace Microsoft.Quantum.MachineLearning { return _CallFlipped(fn, _, _); } + /// # Summary + /// Returns an array of uncontrolled (single-qubit) rotations along a given + /// axis, with one rotation for each qubit in a register, parameterized by + /// distinct model parameters. + /// + /// # Input + /// ## nQubits + /// The number of qubits acted on by the given layer. + /// ## axis + /// The rotation axis for each rotation in the given layer. + /// + /// # Output + /// An array of controlled rotations about the given axis, one on each of + /// `nQubits` qubits. function LocalRotationsLayer(nQubits : Int, axis : Pauli) : ControlledRotation[] { // [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] return Mapped( @@ -94,6 +108,19 @@ namespace Microsoft.Quantum.MachineLearning { } + /// # Summary + /// Returns an array of uncontrolled (single-qubit) rotations along a given + /// axis, parameterized by distinct model parameters. + /// + /// # Input + /// ## idxsQubits + /// Indices for the qubits to be used as the targets for each rotation. + /// ## axis + /// The rotation axis for each rotation in the given layer. + /// + /// # Output + /// An array of controlled rotations about the given axis, one on each of + /// `nQubits` qubits. function PartialRotationsLayer(idxsQubits : Int[], axis : Pauli) : ControlledRotation[] { // [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] return Mapped( @@ -104,6 +131,33 @@ namespace Microsoft.Quantum.MachineLearning { ); } + /// # Summary + /// Returns an array of singly controlled rotations along a given axis, + /// arranged cyclically across a register of qubits, and parameterized by + /// distinct model parameters. + /// + /// # Input + /// ## nQubits + /// The number of qubits acted on by the given layer. + /// ## axis + /// The rotation axis for each rotation in the given layer. + /// ## stride + /// The separation between the target and control indices for each rotation. + /// + /// # Output + /// An array of two-qubit controlled rotations laid out cyclically across + /// a register of `nQubits` qubits. + /// + /// # Example + /// The following are equivalent: + /// ```Q# + /// let layer = CyclicEntanglingLayer(3, PauliX, 2); + /// let layer = [ + /// ControlledRotation((0, [2]), PauliX, 0), + /// ControlledRotation((1, [0]), PauliX, 1), + /// ControlledRotation((2, [1]), PauliX, 2) + /// ]; + /// ``` function CyclicEntanglingLayer(nQubits : Int, axis : Pauli, stride : Int) : ControlledRotation[] { mutable rotations = new ControlledRotation[0]; for (idxTarget in 0..nQubits - 1) { @@ -118,6 +172,34 @@ namespace Microsoft.Quantum.MachineLearning { return rotations; } + /// # Summary + /// Given one or more layers of controlled rotations, returns a single + /// layer with model parameter index shifted such that distinct layers + /// are parameterized by distinct model parameters. + /// + /// # Input + /// ## layers + /// The layers to be combined. + /// + /// # Output + /// A single layer of controlled rotations, representing the concatenation + /// of all other layers. + /// + /// # Example + /// The following are equivalent: + /// ```Q# + /// let structure = CombinedStructure([ + /// LocalRotationLayer(2, PauliY), + /// CyclicEntanglingLayer(3, PauliX, 2) + /// ]); + /// let structure = [ + /// ControlledRotation((0, new Int[0]), PauliY, 0), + /// ControlledRotation((1, new Int[0]), PauliY, 1), + /// ControlledRotation((0, [2]), PauliX, 2), + /// ControlledRotation((1, [0]), PauliX, 3), + /// ControlledRotation((2, [1]), PauliX, 4) + /// ]; + /// ``` function CombinedStructure(layers : ControlledRotation[][]) : ControlledRotation[] { mutable combined = Head(layers); mutable offset = Length(combined); From d9fccf28975d6007fafd0d67a73a43e0b2712f51 Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Fri, 31 Jan 2020 12:17:46 -0800 Subject: [PATCH 25/43] =?UTF-8?q?Training.qs=20=E2=98=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- MachineLearning/src/Training.qs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/MachineLearning/src/Training.qs b/MachineLearning/src/Training.qs index 38ef2f313b1..6792935d3c0 100644 --- a/MachineLearning/src/Training.qs +++ b/MachineLearning/src/Training.qs @@ -51,10 +51,8 @@ namespace Microsoft.Quantum.MachineLearning { /// on a given labeled training set. /// /// # Input - /// ## structure - /// Structure of the sequential classifier to be trained. - /// ## parameterSource - /// A list of parameter vectors to use as starting points during training. + /// ## models + /// An array of models to be used as starting points during training. /// ## samples /// A set of labeled training data that will be used to perform training. /// ## options @@ -197,10 +195,8 @@ namespace Microsoft.Quantum.MachineLearning { /// For best accuracy, set to 1. /// ## options /// Options to be used in training. - /// ## structure - /// The structure of the sequential classifier to be trained. /// ## model - /// The parameterization and bias of the sequential model to be trained. + /// The sequential model to be trained. /// ## nPreviousBestMisses /// The best number of misclassifications observed in previous epochs. /// From 4bebaec5509dae1a1a968a7c918bbcc09c50177a Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Fri, 31 Jan 2020 12:24:21 -0800 Subject: [PATCH 26/43] =?UTF-8?q?Validation.qs=20=E2=98=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- MachineLearning/src/Validation.qs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/MachineLearning/src/Validation.qs b/MachineLearning/src/Validation.qs index afb7896e577..83ebe87d17d 100644 --- a/MachineLearning/src/Validation.qs +++ b/MachineLearning/src/Validation.qs @@ -54,6 +54,25 @@ namespace Microsoft.Quantum.MachineLearning { return Length(Misclassifications(proposed, actual)); } + /// # Summary + /// Validates a given sequential classifier against a given set of + /// pre-labeled samples. + /// + /// # Input + /// ## model + /// The sequential model to be validated. + /// ## samples + /// The samples to be used to validate the given model. + /// ## tolerance + /// The approximation tolerance to use in encoding each sample as an input + /// to the sequential classifier. + /// ## nMeasurements + /// The number of measurements to use in classifying each sample. + /// ## validationSchedule + /// The schedule by which samples should be drawn from the validation set. + /// + /// # Ouput + /// The results of the given validation. operation ValidateSequentialClassifier( model : SequentialModel, samples : LabeledSample[], From c66bd7d21832ea17545ed239aba359b9565891ac Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Mon, 3 Feb 2020 09:29:45 -0800 Subject: [PATCH 27/43] Fix build properties to use latest QDK version. --- Build/props/tests.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Build/props/tests.props b/Build/props/tests.props index 3a231bc74cc..f8425ad56b1 100644 --- a/Build/props/tests.props +++ b/Build/props/tests.props @@ -6,7 +6,7 @@ - + From e61441a29b831ca449f184e003575b9cb52ad706 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Mon, 3 Feb 2020 10:20:59 -0800 Subject: [PATCH 28/43] Update MachineLearning/src/Types.qs Co-Authored-By: bettinaheim <34236215+bettinaheim@users.noreply.github.com> --- MachineLearning/src/Types.qs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MachineLearning/src/Types.qs b/MachineLearning/src/Types.qs index 23d862ca3b1..c86d223df5c 100644 --- a/MachineLearning/src/Types.qs +++ b/MachineLearning/src/Types.qs @@ -71,7 +71,7 @@ namespace Microsoft.Quantum.MachineLearning { /// /// # Input /// ## NQubits - /// The nubmer of qubits on which the encoded input is defined. + /// The number of qubits on which the encoded input is defined. /// ## Prepare /// An operation which prepares the encoded input on a little-endian /// register of `NQubits` qubits. From 89511e05b7d98a8a70d0cac8a9b0dd6abc085b5a Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Mon, 3 Feb 2020 10:39:49 -0800 Subject: [PATCH 29/43] Addressing feedback from @bettinaheim. --- MachineLearning/src/Structure.qs | 3 +- MachineLearning/src/Training.qs | 6 ++- .../src/Canon/Utils/ControlledOnBitString.qs | 43 ++++++++++--------- 3 files changed, 28 insertions(+), 24 deletions(-) diff --git a/MachineLearning/src/Structure.qs b/MachineLearning/src/Structure.qs index 53d5d0b0d46..7b8f191a3c8 100644 --- a/MachineLearning/src/Structure.qs +++ b/MachineLearning/src/Structure.qs @@ -109,7 +109,7 @@ namespace Microsoft.Quantum.MachineLearning { /// # Summary - /// Returns an array of uncontrolled (single-qubit) rotations along a given + /// Returns an array of single-qubit rotations along a given /// axis, parameterized by distinct model parameters. /// /// # Input @@ -122,7 +122,6 @@ namespace Microsoft.Quantum.MachineLearning { /// An array of controlled rotations about the given axis, one on each of /// `nQubits` qubits. function PartialRotationsLayer(idxsQubits : Int[], axis : Pauli) : ControlledRotation[] { - // [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] return Mapped( _Flipped(ControlledRotation(_, axis, _)), Enumerated( diff --git a/MachineLearning/src/Training.qs b/MachineLearning/src/Training.qs index 6792935d3c0..a95e4b21ef1 100644 --- a/MachineLearning/src/Training.qs +++ b/MachineLearning/src/Training.qs @@ -170,9 +170,13 @@ namespace Microsoft.Quantum.MachineLearning { } } - // TODO:REVIEW: Ok to interpret utility as size of the overall move? return ( + // NB: Here, we define the utility of an optimization step as the + // size of the step taken during the move. We can find this size + // as the squared norm of the gradient. SquaredNorm(batchGradient), + // To actually apply the step, we can use Mapped(PlusD, Zip(...)) + // to represent element-wise vector summation. model w/ Parameters <- Mapped(PlusD, Zip(model::Parameters, batchGradient)) ); diff --git a/Standard/src/Canon/Utils/ControlledOnBitString.qs b/Standard/src/Canon/Utils/ControlledOnBitString.qs index 777085e20c1..e5c50f26704 100644 --- a/Standard/src/Canon/Utils/ControlledOnBitString.qs +++ b/Standard/src/Canon/Utils/ControlledOnBitString.qs @@ -4,7 +4,7 @@ namespace Microsoft.Quantum.Canon { open Microsoft.Quantum.Convert; open Microsoft.Quantum.Intrinsic; - + /// # Summary /// Applies a unitary operator on the target register if the control register state corresponds to a specified bit mask. /// @@ -27,39 +27,41 @@ namespace Microsoft.Quantum.Canon { { ApplyWithCA(ApplyPauliFromBitString(PauliX, false, bits, _), Controlled oracle(_, targetRegister), controlRegister); } - + adjoint invert; controlled distribute; controlled adjoint distribute; } - - + + /// # Summary - /// Returns a unitary operator that applies an oracle on the target register if the control register state corresponds to a specified bit mask. + /// Returns a unitary operation that applies an oracle on the target register if the control register state corresponds to a specified bit mask. /// /// # Description - /// The output of this function can be represented by a unitary transformation $U$ such that + /// The output of this function is an operation that can be represented by a + /// unitary transformation $U$ such that /// \begin{align} - /// U \ket{b_0 b_1 \cdots b_{n - 1}} \ket{\psi} = \ket{b_0 b_1 \cdots b_{n-1}} \otimes + /// U \ket{b_0 b_1 \cdots b_{n - 1}} \ket{\psi} = \ket{b_0 b_1 \cdots b_{n-1}} \otimes /// \begin{cases} /// V \ket{\psi} & \textrm{if} (b_0 b_1 \cdots b_{n - 1}) = \texttt{bits} \\\\ /// \ket{\psi} & \textrm{otherwise} /// \end{cases}, /// \end{align} - /// where $V$ is a unitary transformation that represents the action of the `oracle` operation. + /// where $V$ is a unitary transformation that represents the action of the + /// `oracle` operation. /// /// # Input /// ## bits - /// The bit string to control the given unitary operator on. + /// The bit string to control the given unitary operation on. /// ## oracle - /// Unitary operator to be applied on the target register. + /// The unitary operation to be applied on the target register. /// /// # Output - /// A unitary operator that applies `oracle` on the target register if the control register state corresponds to the bit mask `bits`. + /// A unitary operation that applies `oracle` on the target register if the control register state corresponds to the bit mask `bits`. /// /// # Remarks /// The length of `bits` and `controlRegister` must be equal. - /// + /// /// Given a Boolean array `bits` and a unitary operation `oracle`, the output of this function /// is an operation that performs the following steps: /// * apply an `X` operation to each qubit of the control register that corresponds to `false` element of the `bits`; @@ -89,12 +91,12 @@ namespace Microsoft.Quantum.Canon { /// (ControlledOnBitString([false], Z))(register[0..0], register[1]); /// } /// ``` - function ControlledOnBitString<'T> (bits : Bool[], oracle : ('T => Unit is Adj + Ctl)) : ((Qubit[], 'T) => Unit is Adj + Ctl) - { + function ControlledOnBitString<'T> (bits : Bool[], oracle : ('T => Unit is Adj + Ctl)) + : ((Qubit[], 'T) => Unit is Adj + Ctl) { return ControlledOnBitStringImpl(bits, oracle, _, _); } - - + + /// # Summary /// Applies a unitary operator on the target register if the control register state corresponds to a specified positive integer. /// @@ -118,13 +120,13 @@ namespace Microsoft.Quantum.Canon { let bits = IntAsBoolArray(numberState, Length(controlRegister)); (ControlledOnBitString(bits, oracle))(controlRegister, targetRegister); } - + adjoint invert; controlled distribute; controlled adjoint distribute; } - - + + /// # Summary /// Returns a unitary operator that applies an oracle on the target register if the control register state corresponds to a specified positive integer. /// @@ -140,7 +142,6 @@ namespace Microsoft.Quantum.Canon { { return ControlledOnIntImpl(numberState, oracle, _, _); } - -} +} From 7997c9a9ffe9325586585e089114725b97d1933a Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Wed, 5 Feb 2020 11:34:10 -0800 Subject: [PATCH 30/43] Began adding more tests. --- MachineLearning/tests/TypesTests.qs | 20 +++++++++++++++++++ MachineLearning/tests/ValidationTests.qs | 19 ++++++++++++++++++ Standard/src/Diagnostics/Facts.qs | 25 ++++++++++++++++++++++++ 3 files changed, 64 insertions(+) create mode 100644 MachineLearning/tests/TypesTests.qs create mode 100644 MachineLearning/tests/ValidationTests.qs diff --git a/MachineLearning/tests/TypesTests.qs b/MachineLearning/tests/TypesTests.qs new file mode 100644 index 00000000000..4a70e80f30e --- /dev/null +++ b/MachineLearning/tests/TypesTests.qs @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning.Tests { + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.MachineLearning as ML; + + @Test("QuantumSimulator") + function ScheduleLengthFact() : Unit { + let actualLength = ML.ScheduleLength(ML.SamplingSchedule([0..4, 1..2..5])); + EqualityFactI(actualLength, 5 + 3, "Wrong output from ScheduleLength."); + } + + @Test("QuantumSimulator") + function SampledFact() : Unit { + let actuallySampled = ML.Sampled(ML.SamplingSchedule([0..4, 1..2..5]), [0, 10, 20, 30, 40, 50, 60, 70]); + AllEqualityFactI(actuallySampled, [0, 10, 20, 30, 40, 10, 30, 50], "Wrong output from Sampled."); + } + +} diff --git a/MachineLearning/tests/ValidationTests.qs b/MachineLearning/tests/ValidationTests.qs new file mode 100644 index 00000000000..9fc54cf74aa --- /dev/null +++ b/MachineLearning/tests/ValidationTests.qs @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning.Tests { + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.MachineLearning as ML; + + @Test("QuantumSimulator") + function MisclassificationsFact() : Unit { + let misclassifications = ML.Misclassifications([0, 1, 0, 0], [0, 1, 1, 0]); + AllEqualityFactI(misclassifications, [2], "Wrong output from Misclassifications."); + } + + @Test("QuantumSimulator") + function NMisclassificationsFact() : Unit { + let nMisclassifications = ML.NMisclassifications([0, 1, 0, 0, 1], [0, 1, 1, 0, 0]); + EqualityFactI(nMisclassifications, 2, "Wrong output from NMisclassifications."); + } +} diff --git a/Standard/src/Diagnostics/Facts.qs b/Standard/src/Diagnostics/Facts.qs index 33224a98d85..eaa4622c58a 100644 --- a/Standard/src/Diagnostics/Facts.qs +++ b/Standard/src/Diagnostics/Facts.qs @@ -200,6 +200,9 @@ namespace Microsoft.Quantum.Diagnostics { /// The array that is expected from a test case of interest. /// ## message /// A message to be printed if the arrays are not equal. + /// + /// # See Also + /// Microsoft.Quantum.Diagnostics.AllEqualityFactI function AllEqualityFactB(actual : Bool[], expected : Bool[], message : String) : Unit { let n = Length(actual); if (n != Length(expected)) { @@ -209,4 +212,26 @@ namespace Microsoft.Quantum.Diagnostics { Ignore(Mapped(EqualityFactB(_, _, message), Zip(actual, expected))); } + /// # Summary + /// Asserts that two arrays of integer values are equal. + /// + /// # Input + /// ## actual + /// The array that is produced by a test case of interest. + /// ## expected + /// The array that is expected from a test case of interest. + /// ## message + /// A message to be printed if the arrays are not equal. + /// + /// # See Also + /// Microsoft.Quantum.Diagnostics.AllEqualityFactB + function AllEqualityFactI(actual : Int[], expected : Int[], message : String) : Unit { + let n = Length(actual); + if (n != Length(expected)) { + fail message; + } + + Ignore(Mapped(EqualityFactI(_, _, message), Zip(actual, expected))); + } + } From 2076e8c10be6fd44615e8832f33b05da637a62c7 Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Wed, 5 Feb 2020 12:54:18 -0800 Subject: [PATCH 31/43] New test for applysequentialclassifier. --- MachineLearning/tests/StructureTests.qs | 52 +++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 MachineLearning/tests/StructureTests.qs diff --git a/MachineLearning/tests/StructureTests.qs b/MachineLearning/tests/StructureTests.qs new file mode 100644 index 00000000000..6ffeafc2ed5 --- /dev/null +++ b/MachineLearning/tests/StructureTests.qs @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning.Tests { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.MachineLearning as ML; + + @Test("QuantumSimulator") + function NQubitsRequiredFact() : Unit { + let model = Default() + w/ Structure <- [ + ML.ControlledRotation((3, [7, 9]), PauliX, 0) + ]; + let actual = ML.NQubitsRequired(model); + EqualityFactI(actual, 10, "Wrong output from ScheduleLength."); + } + + function ExampleModel() : ML.SequentialModel { + return Default() + w/ Structure <- [ + Default() + w/ TargetIndex <- 2 + w/ ControlIndices <- [0] + w/ Axis <- PauliX + w/ ParameterIndex <- 0, + Default() + w/ TargetIndex <- 0 + w/ ControlIndices <- [1, 2] + w/ Axis <- PauliZ + w/ ParameterIndex <- 1 + ] + w/ Parameters <- [ + 1.234, + 2.345 + ]; + } + + operation ApplyExampleModelManually(register : Qubit[]) : Unit is Adj + Ctl { + Controlled R([register[0]], (PauliX, 1.234, register[2])); + Controlled R([register[1], register[2]], (PauliZ, 2.345, register[0])); + } + + @Test("QuantumSimulator") + operation TestApplySequentialClassifier() : Unit { + AssertOperationsEqualReferenced(ML.NQubitsRequired(ExampleModel()), + ML.ApplySequentialClassifier(ExampleModel(), _), + ApplyExampleModelManually + ); + } + +} From cfb10d3ebe3becc15bdea26af38dcc81712baa6d Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Wed, 5 Feb 2020 13:54:47 -0800 Subject: [PATCH 32/43] Reduce warning count. --- Standard/src/Arithmetic/Integer.qs | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/Standard/src/Arithmetic/Integer.qs b/Standard/src/Arithmetic/Integer.qs index 07c9d9a306a..df9209fc4f6 100644 --- a/Standard/src/Arithmetic/Integer.qs +++ b/Standard/src/Arithmetic/Integer.qs @@ -583,16 +583,18 @@ namespace Microsoft.Quantum.Arithmetic { X(ys![0]); } else { - ApplyToEachCA(X, ys!); - ApplyToEachCA(CNOT, Zip(Rest(xs!),Rest(ys!))); - (Adjoint CascadeCNOT) (Rest(xs!)); - CascadeCCNOT (Most(ys!), xs!); - (Controlled CCNOT) (controls, (xs![nQubits-1], ys![nQubits-1], result)); - (Adjoint CascadeCCNOT) (Most(ys!), xs!); - CascadeCNOT(Rest(xs!)); - (Controlled CNOT) (controls, (xs![nQubits-1], result)); - ApplyToEachCA(CNOT, Zip(Rest(xs!), Rest(ys!))); - ApplyToEachCA(X, ys!); + within { + ApplyToEachCA(X, ys!); + ApplyToEachCA(CNOT, Zip(Rest(xs!), Rest(ys!))); + } apply { + within { + (Adjoint ApplyCNOTChain) (Rest(xs!)); + CascadeCCNOT (Most(ys!), xs!); + } apply { + (Controlled CCNOT) (controls, (xs![nQubits-1], ys![nQubits-1], result)); + } + (Controlled CNOT) (controls, (xs![nQubits-1], result)); + } } } } From 1c85c75992ff06fad4ab4e895a975ba2d0aeca2c Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Wed, 5 Feb 2020 14:53:43 -0800 Subject: [PATCH 33/43] More tests of structure functions. --- MachineLearning/src/Structure.qs | 3 +- MachineLearning/tests/StructureTests.qs | 136 +++++++++++++++++++++++- 2 files changed, 137 insertions(+), 2 deletions(-) diff --git a/MachineLearning/src/Structure.qs b/MachineLearning/src/Structure.qs index 7b8f191a3c8..c1c8a0810f6 100644 --- a/MachineLearning/src/Structure.qs +++ b/MachineLearning/src/Structure.qs @@ -22,7 +22,7 @@ namespace Microsoft.Quantum.MachineLearning { : Int { mutable nQubitsRequired = 0; for (gate in model::Structure) { - set nQubitsRequired = Fold( + set nQubitsRequired = 1 + Fold( MaxI, 0, gate::ControlIndices + [ gate::TargetIndex, @@ -51,6 +51,7 @@ namespace Microsoft.Quantum.MachineLearning { : (Unit) is Adj + Ctl { for (gate in model::Structure) { if (gate::ParameterIndex < Length(model::Parameters)) { + Message($"axis {gate::Axis} parameter {model::Parameters}[{gate::ParameterIndex}] target {gate::TargetIndex}"); let input = (gate::Axis, model::Parameters[gate::ParameterIndex], qubits[gate::TargetIndex]); if (IsEmpty(gate::ControlIndices)) { // Uncontrolled rotation of target diff --git a/MachineLearning/tests/StructureTests.qs b/MachineLearning/tests/StructureTests.qs index 6ffeafc2ed5..76da542f15f 100644 --- a/MachineLearning/tests/StructureTests.qs +++ b/MachineLearning/tests/StructureTests.qs @@ -2,6 +2,8 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.MachineLearning.Tests { + open Microsoft.Quantum.Logical; + open Microsoft.Quantum.Arrays; open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Diagnostics; open Microsoft.Quantum.MachineLearning as ML; @@ -13,7 +15,7 @@ namespace Microsoft.Quantum.MachineLearning.Tests { ML.ControlledRotation((3, [7, 9]), PauliX, 0) ]; let actual = ML.NQubitsRequired(model); - EqualityFactI(actual, 10, "Wrong output from ScheduleLength."); + EqualityFactI(actual, 10, "Wrong output from NQubitsRequired."); } function ExampleModel() : ML.SequentialModel { @@ -49,4 +51,136 @@ namespace Microsoft.Quantum.MachineLearning.Tests { ); } + function EqualCR(x : ML.ControlledRotation, y : ML.ControlledRotation) : Bool { + return x::Axis == y::Axis and + All(EqualI, Zip(x::ControlIndices, y::ControlIndices)) and + x::TargetIndex == y::TargetIndex and + x::ParameterIndex == y::ParameterIndex; + } + + @Test("QuantumSimulator") + function LocalRotationsLayerFact() : Unit { + Fact(All(EqualCR, Zip( + ML.LocalRotationsLayer(3, PauliY), + [ + Default() + w/ TargetIndex <- 0 + w/ ControlIndices <- new Int[0] + w/ Axis <- PauliY + w/ ParameterIndex <- 0, + + Default() + w/ TargetIndex <- 1 + w/ ControlIndices <- new Int[0] + w/ Axis <- PauliY + w/ ParameterIndex <- 1, + + Default() + w/ TargetIndex <- 2 + w/ ControlIndices <- new Int[0] + w/ Axis <- PauliY + w/ ParameterIndex <- 2 + ] + )), "LocalRotationsLayer returned wrong output."); + } + + @Test("QuantumSimulator") + function PartialRotationsLayerFact() : Unit { + Fact(All(EqualCR, Zip( + ML.PartialRotationsLayer([4, 5, 6], PauliY), + [ + Default() + w/ TargetIndex <- 4 + w/ ControlIndices <- new Int[0] + w/ Axis <- PauliY + w/ ParameterIndex <- 0, + + Default() + w/ TargetIndex <- 5 + w/ ControlIndices <- new Int[0] + w/ Axis <- PauliY + w/ ParameterIndex <- 1, + + Default() + w/ TargetIndex <- 6 + w/ ControlIndices <- new Int[0] + w/ Axis <- PauliY + w/ ParameterIndex <- 2 + ] + )), "PartialRotationsLayer returned wrong output."); + } + + @Test("QuantumSimulator") + function CyclicEntanglingLayerFact() : Unit { + Fact(All(EqualCR, Zip( + ML.CyclicEntanglingLayer(3, PauliX, 2), + [ + Default() + w/ TargetIndex <- 0 + w/ ControlIndices <- [2] + w/ Axis <- PauliX + w/ ParameterIndex <- 0, + + Default() + w/ TargetIndex <- 1 + w/ ControlIndices <- [0] + w/ Axis <- PauliX + w/ ParameterIndex <- 1, + + Default() + w/ TargetIndex <- 2 + w/ ControlIndices <- [1] + w/ Axis <- PauliX + w/ ParameterIndex <- 2 + ] + )), "CyclicEntanglingLayer returned wrong output."); + } + + @Test("QuantumSimulator") + function CombinedStructureFact() : Unit { + Fact(All(EqualCR, Zip( + ML.CombinedStructure([ + [ + Default() + w/ TargetIndex <- 0 + w/ ControlIndices <- [2] + w/ Axis <- PauliX + w/ ParameterIndex <- 0, + + Default() + w/ TargetIndex <- 1 + w/ ControlIndices <- [0] + w/ Axis <- PauliX + w/ ParameterIndex <- 1, + ], + [ + Default() + w/ TargetIndex <- 0 + w/ ControlIndices <- [2] + w/ Axis <- PauliZ + w/ ParameterIndex <- 0, + ] + ]), + [ + Default() + w/ TargetIndex <- 0 + w/ ControlIndices <- [2] + w/ Axis <- PauliX + w/ ParameterIndex <- 0, + + Default() + w/ TargetIndex <- 1 + w/ ControlIndices <- [0] + w/ Axis <- PauliX + w/ ParameterIndex <- 1, + + Default() + w/ TargetIndex <- 2 + w/ ControlIndices <- [1] + w/ Axis <- PauliZ + w/ ParameterIndex <- 2 + ] + )), "CombinedStructure returned wrong output."); + } + } From fd0b40ae42a3f4cbe19a72bb5a86d18e5d6a80ec Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Wed, 5 Feb 2020 15:08:03 -0800 Subject: [PATCH 34/43] Fixed test and eliminated some warnings. --- MachineLearning/tests/StructureTests.qs | 45 +++++++++++++------------ Standard/src/Arithmetic/Comparators.qs | 18 ++++------ Standard/src/Arithmetic/Integer.qs | 2 +- 3 files changed, 31 insertions(+), 34 deletions(-) diff --git a/MachineLearning/tests/StructureTests.qs b/MachineLearning/tests/StructureTests.qs index 76da542f15f..49ee4503189 100644 --- a/MachineLearning/tests/StructureTests.qs +++ b/MachineLearning/tests/StructureTests.qs @@ -138,29 +138,30 @@ namespace Microsoft.Quantum.MachineLearning.Tests { @Test("QuantumSimulator") function CombinedStructureFact() : Unit { + let combined = ML.CombinedStructure([ + [ + Default() + w/ TargetIndex <- 0 + w/ ControlIndices <- [2] + w/ Axis <- PauliX + w/ ParameterIndex <- 0, + + Default() + w/ TargetIndex <- 1 + w/ ControlIndices <- [0] + w/ Axis <- PauliX + w/ ParameterIndex <- 1, + ], + [ + Default() + w/ TargetIndex <- 2 + w/ ControlIndices <- [1] + w/ Axis <- PauliZ + w/ ParameterIndex <- 0, + ] + ]); Fact(All(EqualCR, Zip( - ML.CombinedStructure([ - [ - Default() - w/ TargetIndex <- 0 - w/ ControlIndices <- [2] - w/ Axis <- PauliX - w/ ParameterIndex <- 0, - - Default() - w/ TargetIndex <- 1 - w/ ControlIndices <- [0] - w/ Axis <- PauliX - w/ ParameterIndex <- 1, - ], - [ - Default() - w/ TargetIndex <- 0 - w/ ControlIndices <- [2] - w/ Axis <- PauliZ - w/ ParameterIndex <- 0, - ] - ]), + combined, [ Default() w/ TargetIndex <- 0 diff --git a/Standard/src/Arithmetic/Comparators.qs b/Standard/src/Arithmetic/Comparators.qs index c6898baa6f6..217ab59c479 100644 --- a/Standard/src/Arithmetic/Comparators.qs +++ b/Standard/src/Arithmetic/Comparators.qs @@ -54,19 +54,15 @@ namespace Microsoft.Quantum.Arithmetic { } // Implementation step of `ApplyRippleCarryComparatorLE`. - operation _ApplyRippleCarryComparatorLE(x: LittleEndian, y: LittleEndian, auxiliary: Qubit[], output: Qubit) : Unit { - body (...) { - let nQubitsX = Length(x!); + operation _ApplyRippleCarryComparatorLE(x: LittleEndian, y: LittleEndian, auxiliary: Qubit[], output: Qubit) + : Unit is Adj + Ctl { + let nQubitsX = Length(x!); - // Take 2's complement - ApplyToEachCA(X, x! + auxiliary); + // Take 2's complement + ApplyToEachCA(X, x! + auxiliary); - InPlaceMajority(x![0], [y![0], auxiliary[0]]); - ApplyToEachCA(MAJ, Zip3(Most(x!), Rest(y!), Rest(x!))); - } - adjoint auto; - controlled auto; - adjoint controlled auto; + ApplyMajorityInPlace(x![0], [y![0], auxiliary[0]]); + ApplyToEachCA(MAJ, Zip3(Most(x!), Rest(y!), Rest(x!))); } } diff --git a/Standard/src/Arithmetic/Integer.qs b/Standard/src/Arithmetic/Integer.qs index df9209fc4f6..4c59819a1b2 100644 --- a/Standard/src/Arithmetic/Integer.qs +++ b/Standard/src/Arithmetic/Integer.qs @@ -404,7 +404,7 @@ namespace Microsoft.Quantum.Arithmetic { "Input registers must have the same number of qubits." ); ApplyToEachCA(CNOT, Zip(Rest(xs!), Rest(ys!))); - (Adjoint CascadeCNOT) (Rest(xs!)); + Adjoint ApplyCNOTChain(Rest(xs!)); } /// # Summary From 748d129b422a6466d55b5ccc693a3e7e55af1f4a Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Wed, 5 Feb 2020 15:10:39 -0800 Subject: [PATCH 35/43] Further warning elimination. --- MachineLearning/tests/StructureTests.qs | 4 +-- Standard/src/Simulation/Algorithms.qs | 40 ++++++++++++------------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/MachineLearning/tests/StructureTests.qs b/MachineLearning/tests/StructureTests.qs index 49ee4503189..e9ee25a29ae 100644 --- a/MachineLearning/tests/StructureTests.qs +++ b/MachineLearning/tests/StructureTests.qs @@ -150,14 +150,14 @@ namespace Microsoft.Quantum.MachineLearning.Tests { w/ TargetIndex <- 1 w/ ControlIndices <- [0] w/ Axis <- PauliX - w/ ParameterIndex <- 1, + w/ ParameterIndex <- 1 ], [ Default() w/ TargetIndex <- 2 w/ ControlIndices <- [1] w/ Axis <- PauliZ - w/ ParameterIndex <- 0, + w/ ParameterIndex <- 0 ] ]); Fact(All(EqualCR, Zip( diff --git a/Standard/src/Simulation/Algorithms.qs b/Standard/src/Simulation/Algorithms.qs index 3ecc5d7f19f..ef33f64567b 100644 --- a/Standard/src/Simulation/Algorithms.qs +++ b/Standard/src/Simulation/Algorithms.qs @@ -9,7 +9,7 @@ namespace Microsoft.Quantum.Simulation { // A simulation technique converts an EvolutionGenerator to time evolution // by the encoded system for some time step // Here is an example of a simulation technique. - + /// # Summary /// Implements time-evolution by a term contained in a `GeneratorSystem`. /// @@ -28,8 +28,8 @@ namespace Microsoft.Quantum.Simulation { let generatorIndex = generatorSystemFunction(idx); (evolutionSet!(generatorIndex))!(stepsize, qubits); } - - + + /// # Summary /// Implements a single time-step of time-evolution by the system /// described in an `EvolutionGenerator` using a Trotter–Suzuki @@ -54,19 +54,19 @@ namespace Microsoft.Quantum.Simulation { { let (evolutionSet, generatorSystem) = evolutionGenerator!; let (nTerms, generatorSystemFunction) = generatorSystem!; - + // The input to DecomposeIntoTimeStepsCA has signature // (Int, ((Int, Double, Qubit[]) => () is Adj + Ctl)) let trotterForm = (nTerms, TrotterStepImpl(evolutionGenerator, _, _, _)); - return (DecomposeIntoTimeStepsCA(trotterForm, trotterOrder))(trotterStepSize, _); + return (DecomposedIntoTimeStepsCA(trotterForm, trotterOrder))(trotterStepSize, _); } - - + + // This simulation algorithm takes (timeMax, EvolutionGenerator, // register) and other algorithm-specific parameters (trotterStepSize, // trotterOrder), and performs evolution under the EvolutionGenerator // for time = timeMax. - + /// # Summary /// Makes repeated calls to `TrotterStep` to approximate the /// time-evolution operator exp(_-iHt_). @@ -88,19 +88,19 @@ namespace Microsoft.Quantum.Simulation { { let nTimeSlices = Ceiling(maxTime / trotterStepSize); let resizedTrotterStepSize = maxTime / IntAsDouble(nTimeSlices); - + for (idxTimeSlice in 0 .. nTimeSlices - 1) { (TrotterStep(evolutionGenerator, trotterOrder, resizedTrotterStepSize))(qubits); } } - + adjoint invert; controlled distribute; controlled adjoint distribute; } - - + + /// # Summary /// `SimulationAlgorithm` function that uses a Trotter–Suzuki /// decomposition to approximate the time-evolution operator _exp(-iHt)_. @@ -117,11 +117,11 @@ namespace Microsoft.Quantum.Simulation { { return SimulationAlgorithm(TrotterSimulationAlgorithmImpl(trotterStepSize, trotterOrder, _, _, _)); } - - + + // This simple time-dependent simulation algorithm implements a // sequence of uniformly-sized trotter steps - + /// # Summary /// Implementation of multiple Trotter steps to approximate a unitary /// operator that solves the time-dependent Schrödinger equation. @@ -143,7 +143,7 @@ namespace Microsoft.Quantum.Simulation { { let nTimeSlices = Ceiling(maxTime / trotterStepSize); let resizedTrotterStepSize = maxTime / IntAsDouble(nTimeSlices); - + for (idxTimeSlice in 0 .. nTimeSlices - 1) { let schedule = IntAsDouble(idxTimeSlice) / IntAsDouble(nTimeSlices); @@ -153,13 +153,13 @@ namespace Microsoft.Quantum.Simulation { (TrotterSimulationAlgorithm(resizedTrotterStepSize, trotterOrder))!(resizedTrotterStepSize, evolutionGenerator, qubits); } } - + adjoint invert; controlled distribute; controlled adjoint distribute; } - - + + /// # Summary /// `TimeDependentSimulationAlgorithm` function that uses a Trotter–Suzuki /// decomposition to approximate a unitary operator that solves the @@ -177,7 +177,7 @@ namespace Microsoft.Quantum.Simulation { { return TimeDependentSimulationAlgorithm(TimeDependentTrotterSimulationAlgorithmImpl(trotterStepSize, trotterOrder, _, _, _)); } - + } From a387017c95452fb2ca67a29d89c1dd2ae52dd67d Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Wed, 5 Feb 2020 15:29:50 -0800 Subject: [PATCH 36/43] Tests for InferredLabel and InferredLabels. --- MachineLearning/tests/ClassificationTests.qs | 25 ++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 MachineLearning/tests/ClassificationTests.qs diff --git a/MachineLearning/tests/ClassificationTests.qs b/MachineLearning/tests/ClassificationTests.qs new file mode 100644 index 00000000000..2b41e0cbc5c --- /dev/null +++ b/MachineLearning/tests/ClassificationTests.qs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning.Tests { + open Microsoft.Quantum.Logical; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.MachineLearning as ML; + + @Test("QuantumSimulator") + function InferredLabelFact() : Unit { + EqualityFactI(ML.InferredLabel(0.25, 0.26), 1, "InferredLabel returned wrong class."); + } + + @Test("QuantumSimulator") + function InferredLabelsFact() : Unit { + AllEqualityFactI( + ML.InferredLabels(0.25, [0.23, 0.26, 0.1]), + [0, 1, 0], + "InferredLabels returned at least wrong class." + ); + } + +} From 1039a0d13a5c717b9df6b12ee9617e4434b7a3b2 Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Wed, 5 Feb 2020 15:36:11 -0800 Subject: [PATCH 37/43] Undo printf-style debugging. --- MachineLearning/src/Structure.qs | 1 - 1 file changed, 1 deletion(-) diff --git a/MachineLearning/src/Structure.qs b/MachineLearning/src/Structure.qs index c1c8a0810f6..80f31432225 100644 --- a/MachineLearning/src/Structure.qs +++ b/MachineLearning/src/Structure.qs @@ -51,7 +51,6 @@ namespace Microsoft.Quantum.MachineLearning { : (Unit) is Adj + Ctl { for (gate in model::Structure) { if (gate::ParameterIndex < Length(model::Parameters)) { - Message($"axis {gate::Axis} parameter {model::Parameters}[{gate::ParameterIndex}] target {gate::TargetIndex}"); let input = (gate::Axis, model::Parameters[gate::ParameterIndex], qubits[gate::TargetIndex]); if (IsEmpty(gate::ControlIndices)) { // Uncontrolled rotation of target From d9ff81615f2a2c46d07afc75693a90abf24b23e6 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Tue, 11 Feb 2020 14:05:19 -0800 Subject: [PATCH 38/43] Update MachineLearning/tests/ClassificationTests.qs Co-Authored-By: Alan Geller --- MachineLearning/tests/ClassificationTests.qs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MachineLearning/tests/ClassificationTests.qs b/MachineLearning/tests/ClassificationTests.qs index 2b41e0cbc5c..96e9c4ce309 100644 --- a/MachineLearning/tests/ClassificationTests.qs +++ b/MachineLearning/tests/ClassificationTests.qs @@ -18,7 +18,7 @@ namespace Microsoft.Quantum.MachineLearning.Tests { AllEqualityFactI( ML.InferredLabels(0.25, [0.23, 0.26, 0.1]), [0, 1, 0], - "InferredLabels returned at least wrong class." + "InferredLabels returned at least one wrong class." ); } From 86712bdf7eb6fe3b3b3d2aa72b10a8bb87ee3a26 Mon Sep 17 00:00:00 2001 From: Chris Granade Date: Fri, 21 Feb 2020 10:28:37 -0800 Subject: [PATCH 39/43] Addressing feedback from @bettinaheim. (#233) --- Chemistry/src/DataModel/DataModel.csproj | 2 +- MachineLearning/src/Datasets/IrisDataset.qs | 161 +++++++++--------- .../src/Datasets/Properties/NamespaceInfo.qs | 3 +- MachineLearning/src/MachineLearning.csproj | 3 +- MachineLearning/src/Private.qs | 2 +- .../src/Properties/NamespaceInfo.qs | 3 +- MachineLearning/src/Structure.qs | 3 +- MachineLearning/src/Training.qs | 11 +- MachineLearning/src/Types.qs | 14 +- MachineLearning/src/Validation.qs | 3 +- Numerics/src/Numerics.csproj | 2 +- Standard/src/Arithmetic/Reflections.qs | 2 +- Standard/src/Canon/Utils/Multiplexer.qs | 3 +- Standard/src/Standard.csproj | 2 +- 14 files changed, 113 insertions(+), 101 deletions(-) diff --git a/Chemistry/src/DataModel/DataModel.csproj b/Chemistry/src/DataModel/DataModel.csproj index 5f7d7dba3ea..39485766909 100644 --- a/Chemistry/src/DataModel/DataModel.csproj +++ b/Chemistry/src/DataModel/DataModel.csproj @@ -14,7 +14,7 @@ © Microsoft Corporation. All rights reserved. Microsoft.Quantum.Chemistry See: https://docs.microsoft.com/en-us/quantum/relnotes/ - https://github.com/Microsoft/QuantumLibraries/raw/master/LICENSE.txt + MIT https://github.com/Microsoft/QuantumLibraries/tree/master/Chemistry https://secure.gravatar.com/avatar/bd1f02955b2853ba0a3b1cdc2434e8ec.png Quantum Q# Qsharp diff --git a/MachineLearning/src/Datasets/IrisDataset.qs b/MachineLearning/src/Datasets/IrisDataset.qs index 0f7cc0101db..0fefe4bee3f 100644 --- a/MachineLearning/src/Datasets/IrisDataset.qs +++ b/MachineLearning/src/Datasets/IrisDataset.qs @@ -5,86 +5,87 @@ namespace Microsoft.Quantum.MachineLearning.Datasets { open Microsoft.Quantum.MachineLearning; operation IrisTrainingData() : LabeledSample[] { - return [LabeledSample(([0.581557, 0.562824, 0.447721, 0.380219], 1)), - LabeledSample(([0.570241, 0.544165, 0.503041, 0.354484], - 1)), LabeledSample(([0.510784, 0.475476, 0.453884, 0.554087], - 0)), LabeledSample(([0.492527, 0.473762, 0.471326, 0.557511], - 0)), LabeledSample(([0.543273, 0.501972, 0.518341, 0.429186], - 0)), LabeledSample(([0.520013, 0.485702, 0.440061, 0.547747], - 0)), LabeledSample(([0.585261, 0.545431, 0.462126, 0.382641], - 1)), LabeledSample(([0.541059, 0.479438, 0.568697, 0.392401], - 0)), LabeledSample(([0.555604, 0.517196, 0.474722, 0.445479], - 1)), LabeledSample(([0.592542, 0.537541, 0.468725, 0.374486], - 1)), LabeledSample(([0.552254, 0.51027, 0.511855, 0.415505], - 0)), LabeledSample(([0.530874, 0.465606, 0.503344, 0.498025], - 0)), LabeledSample(([0.568502, 0.492452, 0.524331, 0.399215], - 0)), LabeledSample(([0.511768, 0.53197, 0.46875, 0.485156], - 0)), LabeledSample(([0.555756, 0.420141, 0.553663, 0.456152], - 0)), LabeledSample(([0.584546, 0.562276, 0.439516, 0.385976], - 1)), LabeledSample(([0.608485, 0.577022, 0.427781, 0.337336], - 1)), LabeledSample(([0.546234, 0.59768, 0.46082, 0.36339], - 1)), LabeledSample(([0.596632, 0.510739, 0.482188, 0.388162], - 1)), LabeledSample(([0.512997, 0.525043, 0.460839, 0.49879], - 0)), LabeledSample(([0.477408, 0.488846, 0.465015, 0.562914], - 0)), LabeledSample(([0.553381, 0.457028, 0.546788, 0.431182], - 0)), LabeledSample(([0.543981, 0.555533, 0.491698, 0.392047], - 1)), LabeledSample(([0.532066, 0.497762, 0.5178, 0.448354], - 1)), LabeledSample(([0.505981, 0.460209, 0.506897, 0.524639], - 0)), LabeledSample(([0.44959, 0.489591, 0.490236, 0.563772], - 0)), LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], - 0)), LabeledSample(([0.552142, 0.553439, 0.474121, 0.405035], - 1)), LabeledSample(([0.495714, 0.452003, 0.497858, 0.549635], - 0)), LabeledSample(([0.523342, 0.480002, 0.484639, 0.510722], - 0)), LabeledSample(([0.493365, 0.473391, 0.504036, 0.527673], - 0)), LabeledSample(([0.552146, 0.542635, 0.505733, 0.380679], - 1)), LabeledSample(([0.578287, 0.517882, 0.46856, 0.421704], - 1)), LabeledSample(([0.588389, 0.569435, 0.47621, 0.320571], - 1)), LabeledSample(([0.572852, 0.583312, 0.441711, 0.369431], - 1)), LabeledSample(([0.540173, 0.571013, 0.440259, 0.43397], - 1)), LabeledSample(([0.588118, 0.554021, 0.452409, 0.377498], - 1)), LabeledSample(([0.499325, 0.454156, 0.500229, 0.542391], - 0)), LabeledSample(([0.541172, 0.446455, 0.491748, 0.515746], - 0)), LabeledSample(([0.501365, 0.513378, 0.488352, 0.496577], - 0)), LabeledSample(([0.519525, 0.498491, 0.475854, 0.505137], - 0)), LabeledSample(([0.549086, 0.561405, 0.474075, 0.398223], - 1)), LabeledSample(([0.504199, 0.486123, 0.476877, 0.53109], - 0)), LabeledSample(([0.530715, 0.466196, 0.504931, 0.496032], - 0)), LabeledSample(([0.515663, 0.527232, 0.474253, 0.480835], - 0)), LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], - 0)), LabeledSample(([0.591455, 0.54028, 0.471969, 0.368136], - 1)), LabeledSample(([0.459772, 0.46144, 0.462874, 0.601191], - 0)), LabeledSample(([0.527031, 0.492257, 0.472236, 0.506867], - 0)), LabeledSample(([0.534498, 0.534498, 0.495766, 0.427598], - 0)), LabeledSample(([0.561849, 0.441966, 0.530269, 0.455857], - 0)), LabeledSample(([0.483984, 0.503088, 0.458885, 0.549624], - 0)), LabeledSample(([0.525126, 0.566848, 0.450923, 0.446761], - 1)), LabeledSample(([0.576674, 0.501348, 0.480182, 0.430723], - 1)), LabeledSample(([0.58787, 0.558697, 0.451917, 0.371534], - 1)), LabeledSample(([0.584716, 0.552543, 0.446305, 0.391937], - 1)), LabeledSample(([0.604866, 0.502993, 0.484769, 0.382275], - 1)), LabeledSample(([0.576834, 0.538774, 0.469003, 0.39626], - 1)), LabeledSample(([0.588747, 0.563029, 0.444888, 0.372089], - 1)), LabeledSample(([0.575899, 0.560012, 0.4573, 0.38158], - 1)), LabeledSample(([0.552402, 0.574207, 0.444699, 0.409123], - 1)), LabeledSample(([0.589006, 0.546658, 0.46965, 0.365605], - 1)), LabeledSample(([0.540387, 0.443462, 0.537296, 0.471843], - 0)), LabeledSample(([0.570654, 0.548912, 0.458326, 0.403716], - 1)), LabeledSample(([0.544644, 0.547271, 0.467682, 0.430268], - 1)), LabeledSample(([0.525228, 0.503964, 0.508832, 0.459615], - 0)), LabeledSample(([0.462827, 0.527655, 0.461528, 0.542553], - 0)), LabeledSample(([0.50897, 0.522189, 0.507054, 0.459527], - 0)), LabeledSample(([0.546369, 0.577899, 0.460934, 0.393768], - 1)), LabeledSample(([0.615382, 0.467063, 0.492079, 0.401268], - 1)), LabeledSample(([0.573572, 0.473185, 0.510765, 0.431544], - 1)), LabeledSample(([0.510624, 0.60155, 0.43847, 0.430285], - 1)), LabeledSample(([0.563956, 0.532924, 0.469591, 0.421223], - 1)), LabeledSample(([0.581565, 0.592669, 0.391677, 0.396376], - 1)), LabeledSample(([0.533848, 0.501219, 0.4732, 0.489762], - 0)), LabeledSample(([0.530036, 0.577194, 0.452731, 0.425375], - 1)), LabeledSample(([0.595573, 0.439349, 0.494919, 0.455325], - 1)), LabeledSample(([0.584424, 0.557699, 0.438769, 0.393576], - 1)), LabeledSample(([0.544759, 0.441244, 0.494108, 0.514196], - 0)), LabeledSample(([0.552072, 0.545641, 0.487013, 0.400388], 1)) + return [ + LabeledSample(([0.581557, 0.562824, 0.447721, 0.380219], 1)), + LabeledSample(([0.570241, 0.544165, 0.503041, 0.354484], 1)), + LabeledSample(([0.510784, 0.475476, 0.453884, 0.554087], 0)), + LabeledSample(([0.492527, 0.473762, 0.471326, 0.557511], 0)), + LabeledSample(([0.543273, 0.501972, 0.518341, 0.429186], 0)), + LabeledSample(([0.520013, 0.485702, 0.440061, 0.547747], 0)), + LabeledSample(([0.585261, 0.545431, 0.462126, 0.382641], 1)), + LabeledSample(([0.541059, 0.479438, 0.568697, 0.392401], 0)), + LabeledSample(([0.555604, 0.517196, 0.474722, 0.445479], 1)), + LabeledSample(([0.592542, 0.537541, 0.468725, 0.374486], 1)), + LabeledSample(([0.552254, 0.51027, 0.511855, 0.415505], 0)), + LabeledSample(([0.530874, 0.465606, 0.503344, 0.498025], 0)), + LabeledSample(([0.568502, 0.492452, 0.524331, 0.399215], 0)), + LabeledSample(([0.511768, 0.53197, 0.46875, 0.485156], 0)), + LabeledSample(([0.555756, 0.420141, 0.553663, 0.456152], 0)), + LabeledSample(([0.584546, 0.562276, 0.439516, 0.385976], 1)), + LabeledSample(([0.608485, 0.577022, 0.427781, 0.337336], 1)), + LabeledSample(([0.546234, 0.59768, 0.46082, 0.36339], 1)), + LabeledSample(([0.596632, 0.510739, 0.482188, 0.388162], 1)), + LabeledSample(([0.512997, 0.525043, 0.460839, 0.49879], 0)), + LabeledSample(([0.477408, 0.488846, 0.465015, 0.562914], 0)), + LabeledSample(([0.553381, 0.457028, 0.546788, 0.431182], 0)), + LabeledSample(([0.543981, 0.555533, 0.491698, 0.392047], 1)), + LabeledSample(([0.532066, 0.497762, 0.5178, 0.448354], 1)), + LabeledSample(([0.505981, 0.460209, 0.506897, 0.524639], 0)), + LabeledSample(([0.44959, 0.489591, 0.490236, 0.563772], 0)), + LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], 0)), + LabeledSample(([0.552142, 0.553439, 0.474121, 0.405035], 1)), + LabeledSample(([0.495714, 0.452003, 0.497858, 0.549635], 0)), + LabeledSample(([0.523342, 0.480002, 0.484639, 0.510722], 0)), + LabeledSample(([0.493365, 0.473391, 0.504036, 0.527673], 0)), + LabeledSample(([0.552146, 0.542635, 0.505733, 0.380679], 1)), + LabeledSample(([0.578287, 0.517882, 0.46856, 0.421704], 1)), + LabeledSample(([0.588389, 0.569435, 0.47621, 0.320571], 1)), + LabeledSample(([0.572852, 0.583312, 0.441711, 0.369431], 1)), + LabeledSample(([0.540173, 0.571013, 0.440259, 0.43397], 1)), + LabeledSample(([0.588118, 0.554021, 0.452409, 0.377498], 1)), + LabeledSample(([0.499325, 0.454156, 0.500229, 0.542391], 0)), + LabeledSample(([0.541172, 0.446455, 0.491748, 0.515746], 0)), + LabeledSample(([0.501365, 0.513378, 0.488352, 0.496577], 0)), + LabeledSample(([0.519525, 0.498491, 0.475854, 0.505137], 0)), + LabeledSample(([0.549086, 0.561405, 0.474075, 0.398223], 1)), + LabeledSample(([0.504199, 0.486123, 0.476877, 0.53109], 0)), + LabeledSample(([0.530715, 0.466196, 0.504931, 0.496032], 0)), + LabeledSample(([0.515663, 0.527232, 0.474253, 0.480835], 0)), + LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], 0)), + LabeledSample(([0.591455, 0.54028, 0.471969, 0.368136], 1)), + LabeledSample(([0.459772, 0.46144, 0.462874, 0.601191], 0)), + LabeledSample(([0.527031, 0.492257, 0.472236, 0.506867], 0)), + LabeledSample(([0.534498, 0.534498, 0.495766, 0.427598], 0)), + LabeledSample(([0.561849, 0.441966, 0.530269, 0.455857], 0)), + LabeledSample(([0.483984, 0.503088, 0.458885, 0.549624], 0)), + LabeledSample(([0.525126, 0.566848, 0.450923, 0.446761], 1)), + LabeledSample(([0.576674, 0.501348, 0.480182, 0.430723], 1)), + LabeledSample(([0.58787, 0.558697, 0.451917, 0.371534], 1)), + LabeledSample(([0.584716, 0.552543, 0.446305, 0.391937], 1)), + LabeledSample(([0.604866, 0.502993, 0.484769, 0.382275], 1)), + LabeledSample(([0.576834, 0.538774, 0.469003, 0.39626], 1)), + LabeledSample(([0.588747, 0.563029, 0.444888, 0.372089], 1)), + LabeledSample(([0.575899, 0.560012, 0.4573, 0.38158], 1)), + LabeledSample(([0.552402, 0.574207, 0.444699, 0.409123], 1)), + LabeledSample(([0.589006, 0.546658, 0.46965, 0.365605], 1)), + LabeledSample(([0.540387, 0.443462, 0.537296, 0.471843], 0)), + LabeledSample(([0.570654, 0.548912, 0.458326, 0.403716], 1)), + LabeledSample(([0.544644, 0.547271, 0.467682, 0.430268], 1)), + LabeledSample(([0.525228, 0.503964, 0.508832, 0.459615], 0)), + LabeledSample(([0.462827, 0.527655, 0.461528, 0.542553], 0)), + LabeledSample(([0.50897, 0.522189, 0.507054, 0.459527], 0)), + LabeledSample(([0.546369, 0.577899, 0.460934, 0.393768], 1)), + LabeledSample(([0.615382, 0.467063, 0.492079, 0.401268], 1)), + LabeledSample(([0.573572, 0.473185, 0.510765, 0.431544], 1)), + LabeledSample(([0.510624, 0.60155, 0.43847, 0.430285], 1)), + LabeledSample(([0.563956, 0.532924, 0.469591, 0.421223], 1)), + LabeledSample(([0.581565, 0.592669, 0.391677, 0.396376], 1)), + LabeledSample(([0.533848, 0.501219, 0.4732, 0.489762], 0)), + LabeledSample(([0.530036, 0.577194, 0.452731, 0.425375], 1)), + LabeledSample(([0.595573, 0.439349, 0.494919, 0.455325], 1)), + LabeledSample(([0.584424, 0.557699, 0.438769, 0.393576], 1)), + LabeledSample(([0.544759, 0.441244, 0.494108, 0.514196], 0)), + LabeledSample(([0.552072, 0.545641, 0.487013, 0.400388], 1)) ]; } diff --git a/MachineLearning/src/Datasets/Properties/NamespaceInfo.qs b/MachineLearning/src/Datasets/Properties/NamespaceInfo.qs index b1559562869..1c4b5dc3258 100644 --- a/MachineLearning/src/Datasets/Properties/NamespaceInfo.qs +++ b/MachineLearning/src/Datasets/Properties/NamespaceInfo.qs @@ -2,5 +2,6 @@ // Licensed under the MIT License. /// # Summary -/// TODO +/// This namespace provides example datasets for use +/// with quantum machine learning classifiers. namespace Microsoft.Quantum.MachineLearning.Datasets { } diff --git a/MachineLearning/src/MachineLearning.csproj b/MachineLearning/src/MachineLearning.csproj index d9204dafa29..881c3d8b5e2 100644 --- a/MachineLearning/src/MachineLearning.csproj +++ b/MachineLearning/src/MachineLearning.csproj @@ -1,7 +1,6 @@ netstandard2.1 - x64 Microsoft.Quantum.MachineLearning true