diff --git a/.gitignore b/.gitignore
index 654e29215f..a88e947836 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,6 +12,7 @@
*.user
*.userosscache
*.sln.docstates
+*.DS_Store
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
@@ -37,6 +38,9 @@ msbuild.binlog
# Uncomment if you have tasks that create the project's static files in wwwroot
#wwwroot/
+# VSCode files
+.vscode/
+
# Visual Studio 2017 auto generated files
Generated\ Files/
diff --git a/Directory.Build.props b/Directory.Build.props
index dbe8844455..28e1f6527d 100644
--- a/Directory.Build.props
+++ b/Directory.Build.props
@@ -23,8 +23,8 @@
Debug
Debug;Release;Debug-netcoreapp3_1;Release-netcoreapp3_1;Debug-netfx;Release-netfx
AnyCPU
- x64
- $(TargetArchitecture)
+ $([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture)
+ $(TargetArchitecture.ToLower())
$(Platform).$(Configuration)
Open
diff --git a/Directory.Build.targets b/Directory.Build.targets
index bbb9d56b9b..dfeedf59e1 100644
--- a/Directory.Build.targets
+++ b/Directory.Build.targets
@@ -8,27 +8,33 @@
- lib
- .dll
- .so
- .dylib
+ lib
+ .dll
+ .so
+ .dylib
- x64
- $(TargetArchitecture)
- $([MSBuild]::NormalizeDirectory('$(RepoRoot)', 'artifacts', 'bin'))
- $(BinDir)Native\$(NativeTargetArchitecture).$(Configuration)\
+ $([MSBuild]::NormalizeDirectory('$(RepoRoot)', 'artifacts', 'bin'))
+ $(BinDir)Native\$(TargetArchitecture).$(Configuration)\
- AnyCPU
- $(Platform).$(Configuration)
- $(BinDir)$(MSBuildProjectName)\Debug
-
+ AnyCPU
+ $(Platform).$(Configuration)
+ $(BinDir)$(MSBuildProjectName)\Debug
+
-
- $(NativeOutputPath)$(LibPrefix)%(NativeAssemblyReference.Identity)$(LibExtension)
-
+
+ $(NativeOutputPath)$(LibPrefix)%(NativeAssemblyReference.Identity)$(LibExtension)
+
+
+
+
+
+
+
+
+
--rootfsdir
+export ROOTFS_DIR=
+
+# The cross compiling environment is now setup and you can proceed with a normal build
+./build.sh -c Release-netcoreapp3_1 /p:TargetArchitecture=
+```
+
+Note that the `` will usually be arm or arm64 and the `` is bionic for 18.04.
+
### macOS
macOS 10.13 (High Sierra) or higher is needed to build dotnet/machinelearning. We are using a .NET Core 3.1 SDK to build, which supports 10.13 or higher.
diff --git a/docs/building/windows-instructions.md b/docs/building/windows-instructions.md
index 18a463fb49..40eb762812 100644
--- a/docs/building/windows-instructions.md
+++ b/docs/building/windows-instructions.md
@@ -9,7 +9,7 @@ You can build ML.NET either via the command line or by using Visual Studio.
2. **[CMake](https://cmake.org/)** must be installed from [the CMake download page](https://cmake.org/download/#latest) and added to your path.
### Visual Studio 2019 Installation
-We have successfully verified the below build instructions for Visual Studio version 16.4 and higher.
+We have successfully verified the below build instructions for Visual Studio version 16.4 and higher.
#### Visual Studio 2019 - 'Workloads' based install
@@ -37,6 +37,12 @@ The following are the minimum requirements:
* .NET Framework 4.6 Targeting Pack
* Windows Universal CRT SDK
+#### Visual Studio 2019 - Cross compilation for ARM
+
+If you want to cross compile for arm you will also need from the 'Individual components' section:
+ * MSVC v142 - VS 2019 C++ ARM build tools
+ * MSVC v142 - VS 2019 C++ ARM64 build tools
+
## Building Instructions
In order to fetch dependencies which come through Git submodules the following command needs to be run before building: `git submodule update --init`.
@@ -51,7 +57,7 @@ After successfully running the command, the project can be built directly from t
### Building From the Command Line
-You can use the Developer Command Prompt, Powershell or work in any regular cmd. The Developer Command Prompt will have a name like "Developer Command Prompt for VS 2019" or similar in your start menu.
+You can use the Developer Command Prompt, Powershell or work in any regular cmd. The Developer Command Prompt will have a name like "Developer Command Prompt for VS 2019" or similar in your start menu.
From a (non-admin) Command Prompt window:
@@ -61,6 +67,20 @@ From a (non-admin) Command Prompt window:
**Note**: Before working on individual projects or test projects you **must** run `build.cmd` from the root once before beginning that work. It is also a good idea to run `build.cmd` whenever you pull a large set of unknown changes into your branch.
+### Cross compiling for ARM
+
+You can use the Developer Command Prompt, Powershell or work in any regular cmd. The Developer Command Prompt will have a name like "Developer Command Prompt for VS 2019" or similar in your start menu.
+
+From a (non-admin) Command Prompt window based on what you want to target:
+
+- `build.cmd /p:TargetArchitecture=arm`
+- `build.cmd /p:TargetArchitecture=arm64`
+
+You can then pack them into nugets, pick the same target architecture you built with:
+
+- `build.cmd /p:TargetArchitecture=arm -pack`
+- `build.cmd /p:TargetArchitecture=arm64 -pack`
+
## Running Tests
### Running tests from Visual Studio
diff --git a/eng/common/cross/build-rootfs.sh b/eng/common/cross/build-rootfs.sh
index 81e641a57b..6969834f85 100755
--- a/eng/common/cross/build-rootfs.sh
+++ b/eng/common/cross/build-rootfs.sh
@@ -57,6 +57,10 @@ __UbuntuPackages+=" libssl-dev"
__UbuntuPackages+=" zlib1g-dev"
__UbuntuPackages+=" libldap2-dev"
+# ML.NET dependencies
+__UbuntuPackages+=" libomp5"
+__UbuntuPackages+=" libomp-dev"
+
__AlpinePackages+=" curl-dev"
__AlpinePackages+=" krb5-dev"
__AlpinePackages+=" openssl-dev"
diff --git a/src/Microsoft.ML.FastTree/Dataset/DenseIntArray.cs b/src/Microsoft.ML.FastTree/Dataset/DenseIntArray.cs
index f7deea1612..ebfc2d6006 100644
--- a/src/Microsoft.ML.FastTree/Dataset/DenseIntArray.cs
+++ b/src/Microsoft.ML.FastTree/Dataset/DenseIntArray.cs
@@ -24,10 +24,11 @@ internal abstract class DenseIntArray : IntArray, IIntArrayForwardIndexer
{
public override IntArrayType Type { get { return IntArrayType.Dense; } }
- protected DenseIntArray(int length)
+ protected DenseIntArray(int length, PerformSumup sumupHandler = null)
{
Contracts.Assert(length >= 0);
Length = length;
+ SumupHandler = sumupHandler;
}
public override int Length { get; }
@@ -70,7 +71,6 @@ public override IntArray[] Split(int[][] assignment)
return newArrays;
}
-#if USE_FASTTREENATIVE
internal const string NativePath = "FastTreeNative";
[DllImport(NativePath), SuppressUnmanagedCodeSecurity]
private static extern unsafe int C_Sumup_float(
@@ -111,8 +111,6 @@ protected static unsafe void SumupCPlusPlusDense(SumupInputData input, FeatureHi
}
}
-#endif
-
public override IIntArrayForwardIndexer GetIndexer()
{
return this;
@@ -391,18 +389,21 @@ public Dense8BitIntArray(int len)
: base(len)
{
_data = new byte[len];
+ SetupSumupHandler(SumupNative, base.Sumup);
}
public Dense8BitIntArray(byte[] buffer, ref int position)
: base(buffer.ToInt(ref position))
{
_data = buffer.ToByteArray(ref position);
+ SetupSumupHandler(SumupNative, base.Sumup);
}
public Dense8BitIntArray(int len, IEnumerable values)
: base(len)
{
_data = values.Select(i => (byte)i).ToArray(len);
+ SetupSumupHandler(SumupNative, base.Sumup);
}
///
@@ -445,8 +446,7 @@ public override unsafe int this[int index]
}
}
-#if USE_FASTTREENATIVE
- public override void Sumup(SumupInputData input, FeatureHistogram histogram)
+ private void SumupNative(SumupInputData input, FeatureHistogram histogram)
{
unsafe
{
@@ -456,7 +456,8 @@ public override void Sumup(SumupInputData input, FeatureHistogram histogram)
}
}
}
-#endif
+
+ public override void Sumup(SumupInputData input, FeatureHistogram histogram) => SumupHandler(input, histogram);
}
///
@@ -475,12 +476,14 @@ public Dense4BitIntArray(int len)
: base(len)
{
_data = new byte[(len + 1) / 2]; // Even length = half the bytes. Odd length = half the bytes+0.5.
+ SetupSumupHandler(SumupNative, base.Sumup);
}
public Dense4BitIntArray(int len, IEnumerable values)
: base(len)
{
_data = new byte[(len + 1) / 2];
+ SetupSumupHandler(SumupNative, base.Sumup);
int currentIndex = 0;
bool upper = true;
@@ -505,6 +508,7 @@ public Dense4BitIntArray(byte[] buffer, ref int position)
: base(buffer.ToInt(ref position))
{
_data = buffer.ToByteArray(ref position);
+ SetupSumupHandler(SumupNative, base.Sumup);
}
///
@@ -565,8 +569,7 @@ public override unsafe int this[int index]
}
}
-#if USE_FASTTREENATIVE
- public override void Sumup(SumupInputData input, FeatureHistogram histogram)
+ public void SumupNative(SumupInputData input, FeatureHistogram histogram)
{
unsafe
{
@@ -576,7 +579,8 @@ public override void Sumup(SumupInputData input, FeatureHistogram histogram)
}
}
}
-#endif
+
+ public override void Sumup(SumupInputData input, FeatureHistogram histogram) => SumupHandler(input, histogram);
}
///
@@ -592,18 +596,21 @@ public Dense16BitIntArray(int len)
: base(len)
{
_data = new ushort[len];
+ SetupSumupHandler(SumupNative, base.Sumup);
}
public Dense16BitIntArray(int len, IEnumerable values)
: base(len)
{
_data = values.Select(i => (ushort)i).ToArray(len);
+ SetupSumupHandler(SumupNative, base.Sumup);
}
public Dense16BitIntArray(byte[] buffer, ref int position)
: base(buffer.ToInt(ref position))
{
_data = buffer.ToUShortArray(ref position);
+ SetupSumupHandler(SumupNative, base.Sumup);
}
public override unsafe void Callback(Action callback)
@@ -648,8 +655,8 @@ public override unsafe int this[int index]
_data[index] = (ushort)value;
}
}
-#if USE_FASTTREENATIVE
- public override void Sumup(SumupInputData input, FeatureHistogram histogram)
+
+ public void SumupNative(SumupInputData input, FeatureHistogram histogram)
{
unsafe
{
@@ -660,7 +667,9 @@ public override void Sumup(SumupInputData input, FeatureHistogram histogram)
}
}
}
-#endif
+
+ public override void Sumup(SumupInputData input, FeatureHistogram histogram) => SumupHandler(input, histogram);
+
}
///
@@ -676,18 +685,21 @@ public Dense32BitIntArray(int len)
: base(len)
{
_data = new int[len];
+ SetupSumupHandler(SumupNative, base.Sumup);
}
public Dense32BitIntArray(int len, IEnumerable values)
: base(len)
{
_data = values.ToArray(len);
+ SetupSumupHandler(SumupNative, base.Sumup);
}
public Dense32BitIntArray(byte[] buffer, ref int position)
: base(buffer.ToInt(ref position))
{
_data = buffer.ToIntArray(ref position);
+ SetupSumupHandler(SumupNative, base.Sumup);
}
public override unsafe void Callback(Action callback)
@@ -733,8 +745,7 @@ public override int this[int index]
}
}
-#if USE_FASTTREENATIVE
- public override void Sumup(SumupInputData input, FeatureHistogram histogram)
+ public void SumupNative(SumupInputData input, FeatureHistogram histogram)
{
unsafe
{
@@ -745,6 +756,7 @@ public override void Sumup(SumupInputData input, FeatureHistogram histogram)
}
}
}
-#endif
+
+ public override void Sumup(SumupInputData input, FeatureHistogram histogram) => SumupHandler(input, histogram);
}
}
diff --git a/src/Microsoft.ML.FastTree/Dataset/IntArray.cs b/src/Microsoft.ML.FastTree/Dataset/IntArray.cs
index a980b760b6..5401667c5a 100644
--- a/src/Microsoft.ML.FastTree/Dataset/IntArray.cs
+++ b/src/Microsoft.ML.FastTree/Dataset/IntArray.cs
@@ -5,6 +5,7 @@
using System;
using System.Collections.Generic;
using System.Linq;
+using System.Runtime.InteropServices;
using Microsoft.ML.Runtime;
namespace Microsoft.ML.Trainers.FastTree
@@ -32,6 +33,12 @@ internal abstract class IntArray : IEnumerable
///
public abstract int Length { get; }
+ ///
+ /// Bool that checks if we are on x86/x64 so we know if we should use the native code
+ /// or the managed fallbacks.
+ ///
+ public static bool UseFastTreeNative => RuntimeInformation.ProcessArchitecture == Architecture.X64 || RuntimeInformation.ProcessArchitecture == Architecture.X86;
+
///
/// Returns the number of bytes written by the member ToByteArray()
///
@@ -198,6 +205,17 @@ public static IntArray New(byte[] buffer, ref int position)
/// An indexer into the array
public abstract IIntArrayForwardIndexer GetIndexer();
+ // Used in the child classes so we can set either the native or managed Sumup method one time and then
+ // never have to check again.
+ protected delegate void PerformSumup(SumupInputData input, FeatureHistogram histogram);
+
+ // Handler so the child classes don't have to redefine it. If they don't have different logic for native vs managed
+ // code then they don't need to use this.
+ protected PerformSumup SumupHandler { get; set; }
+
+ // Helper to setup the SumupHandler for the derived classes that need it.
+ protected void SetupSumupHandler(PerformSumup native, PerformSumup managed) => SumupHandler = UseFastTreeNative ? native : managed;
+
public virtual void Sumup(SumupInputData input, FeatureHistogram histogram)
{
Contracts.Assert((input.Weights == null) == (histogram.SumWeightsByBin == null));
@@ -313,7 +331,7 @@ public IntArray Compress(uint[] workarray = null)
int bits = SegmentIntArray.BitsForValue((uint)maxval);
if (bits <= 21)
{
- SegmentIntArray.SegmentFindOptimalPath(workarray, Length,
+ SegmentIntArray.SegmentFindOptimalPath.Value(workarray, Length,
bits, out segBits, out segTransitions);
}
}
diff --git a/src/Microsoft.ML.FastTree/Dataset/SegmentIntArray.cs b/src/Microsoft.ML.FastTree/Dataset/SegmentIntArray.cs
index 7fee71b1df..d3589f1e20 100644
--- a/src/Microsoft.ML.FastTree/Dataset/SegmentIntArray.cs
+++ b/src/Microsoft.ML.FastTree/Dataset/SegmentIntArray.cs
@@ -2,6 +2,7 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
+using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
@@ -40,10 +41,38 @@ public override IntArrayType Type
get { return IntArrayType.Segmented; }
}
+ // Delegate defintions so we can store a reference to the native or managed method so we only have to check it once.
+ public delegate void PerformSegmentFindOptimalPath(uint[] array, int len, int bitsNeeded, out long bits, out int transitions);
+ public delegate void PerformSegmentFindOptimalCost(uint[] array, int len, int bitsNeeded, out long bits);
+
+ ///
+ /// Used so we can set either the native or managed SegmentFindOptimalCost method one time and then
+ /// never have to check again.
+ ///
+ public static Lazy SegmentFindOptimalCost = new(() => {
+ if (UseFastTreeNative)
+ return NativeSegmentFindOptimalCost;
+ else
+ return ManagedSegmentFindOptimalCost;
+ });
+
+ ///
+ /// Used so we can set either the native or managed SegmentFindOptimalPath method one time and then
+ /// never have to check again.
+ ///
+ public static Lazy SegmentFindOptimalPath = new(() => {
+ if (UseFastTreeNative)
+ return NativeSegmentFindOptimalPath;
+ else
+ return ManagedSegmentFindOptimalPath;
+ });
+
public SegmentIntArray(int length, IEnumerable values)
{
using (Timer.Time(TimerEvent.SparseConstruction))
{
+ SetupSumupHandler(SumupCPlusPlus, base.Sumup);
+
uint[] vals = new uint[length];
uint pos = 0;
uint max = 0;
@@ -65,7 +94,7 @@ public SegmentIntArray(int length, IEnumerable values)
int maxbits = BitsForValue(max);
int transitions;
long bits;
- SegmentFindOptimalPath(vals, vals.Length, maxbits, out bits, out transitions);
+ SegmentFindOptimalPath.Value(vals, vals.Length, maxbits, out bits, out transitions);
var b = FromWorkArray(vals, vals.Length, bits, transitions);
_segType = b._segType;
_segLength = b._segLength;
@@ -388,8 +417,7 @@ public static SegmentIntArray FromWorkArray(uint[] workArray, int len, long bits
return new SegmentIntArray(st, sl, data, len);
}
-#if USE_FASTTREENATIVE
- public static void SegmentFindOptimalPath(uint[] array, int len, int bitsNeeded, out long bits, out int transitions)
+ public static void NativeSegmentFindOptimalPath(uint[] array, int len, int bitsNeeded, out long bits, out int transitions)
{
if (bitsNeeded <= 15)
{
@@ -409,7 +437,7 @@ public static void SegmentFindOptimalPath(uint[] array, int len, int bitsNeeded,
}
}
- public static void SegmentFindOptimalCost(uint[] array, int len, int bitsNeeded, out long bits)
+ public static void NativeSegmentFindOptimalCost(uint[] array, int len, int bitsNeeded, out long bits)
{
if (bitsNeeded <= 15)
{
@@ -546,20 +574,18 @@ public unsafe void SumupCPlusPlus(SumupInputData input, FeatureHistogram histogr
}
}
}
-#else // when not USE_FASTTREENATIVE
- public static void SegmentFindOptimalPath(uint[] array, int len, int bitsNeeded, out long bits, out int transitions)
+ public static void ManagedSegmentFindOptimalPath(uint[] array, int len, int bitsNeeded, out long bits, out int transitions)
{
uint max;
StatsOfBestEncoding(array, bitsNeeded, true, out bits, out transitions, out max);
}
- public static void SegmentFindOptimalCost(uint[] array, int len, int bitsNeeded, out long bits)
+ public static void ManagedSegmentFindOptimalCost(uint[] array, int len, int bitsNeeded, out long bits)
{
int transitions;
uint max;
StatsOfBestEncoding(array, bitsNeeded, false, out bits, out transitions, out max);
}
-#endif // USE_FASTTREENATIVE
public override void Sumup(SumupInputData input, FeatureHistogram histogram)
{
@@ -567,11 +593,7 @@ public override void Sumup(SumupInputData input, FeatureHistogram histogram)
{
if (_length == 0)
return;
-#if USE_FASTTREENATIVE
- SumupCPlusPlus(input, histogram);
-#else
- base.Sumup(input, histogram);
-#endif
+ SumupHandler(input, histogram);
}
}
diff --git a/src/Microsoft.ML.FastTree/Dataset/SparseIntArray.cs b/src/Microsoft.ML.FastTree/Dataset/SparseIntArray.cs
index 7925893e4f..165dd2ea8a 100644
--- a/src/Microsoft.ML.FastTree/Dataset/SparseIntArray.cs
+++ b/src/Microsoft.ML.FastTree/Dataset/SparseIntArray.cs
@@ -459,25 +459,26 @@ public override void Sumup(SumupInputData input, FeatureHistogram histogram)
{
using (Timer.Time(TimerEvent.SumupSparse))
{
-#if USE_FASTTREENATIVE
- var callbackIntArray = _values as DenseDataCallbackIntArray;
- if (callbackIntArray != null)
- {
- unsafe
+ if (UseFastTreeNative) {
+ var callbackIntArray = _values as DenseDataCallbackIntArray;
+ if (callbackIntArray != null)
{
- fixed (byte* pDeltas = _deltas)
+ unsafe
{
- byte* pDeltas2 = pDeltas;
- callbackIntArray.Callback(pValues =>
+ fixed (byte* pDeltas = _deltas)
{
- SumupCPlusPlusSparse(input, histogram, (byte*)pValues, pDeltas2, _deltas.Length,
- _values.BitsPerItem);
- });
+ byte* pDeltas2 = pDeltas;
+ callbackIntArray.Callback(pValues =>
+ {
+ SumupCPlusPlusSparse(input, histogram, (byte*)pValues, pDeltas2, _deltas.Length,
+ _values.BitsPerItem);
+ });
+ }
}
+ return;
}
- return;
}
-#endif
+
if (input.DocIndices == null)
SumupRoot(input, histogram);
else
@@ -485,7 +486,6 @@ public override void Sumup(SumupInputData input, FeatureHistogram histogram)
}
}
-#if USE_FASTTREENATIVE
internal const string NativePath = "FastTreeNative";
[DllImport(NativePath), SuppressUnmanagedCodeSecurity]
private static extern unsafe int C_SumupDeltaSparse_float(int numBits, byte* pValues, byte* pDeltas, int numDeltas, int* pIndices, float* pSampleOutputs, double* pSampleOutputWeights,
@@ -519,7 +519,6 @@ private unsafe void SumupCPlusPlusSparse(SumupInputData input, FeatureHistogram
throw Contracts.Except("CSumup sumupdeltasparse {0}", rv);
}
}
-#endif
private class DeltaSparseIntArrayIndexer : IIntArrayForwardIndexer
{
diff --git a/src/Microsoft.ML.FastTree/FastTreeRanking.cs b/src/Microsoft.ML.FastTree/FastTreeRanking.cs
index 8bea39ec31..57b6d2df1d 100644
--- a/src/Microsoft.ML.FastTree/FastTreeRanking.cs
+++ b/src/Microsoft.ML.FastTree/FastTreeRanking.cs
@@ -792,119 +792,122 @@ protected override void GetGradientInOneQuery(int query, int threadIndex)
{
// calculates the permutation that orders "scores" in descending order, without modifying "scores"
Array.Copy(_oneTwoThree, permutation, numDocuments);
-#if USE_FASTTREENATIVE
- PermutationSort(permutation, scoresToUse, labels, numDocuments, begin);
- // Get how far about baseline our current
- double baselineDcgGap = 0.0;
- //baselineDCGGap = ((new Random(query)).NextDouble() * 2 - 1)/inverseMaxDCG; // THIS IS EVIL CODE REMOVE LATER
- // Keep track of top 3 labels for later use
- GetTopQueryLabels(query, permutation, true);
-
- if (_useShiftedNdcg)
+ if (IntArray.UseFastTreeNative)
{
- // Set non-best (rank-wise) duplicates to be ignored. Set Score to MinValue, Label to 0
- IgnoreNonBestDuplicates(labels, scoresToUse, permutation, Dataset.DupeIds, begin, numDocuments);
- }
+ PermutationSort(permutation, scoresToUse, labels, numDocuments, begin);
+ // Get how far about baseline our current
+ double baselineDcgGap = 0.0;
+ //baselineDCGGap = ((new Random(query)).NextDouble() * 2 - 1)/inverseMaxDCG; // THIS IS EVIL CODE REMOVE LATER
+ // Keep track of top 3 labels for later use
+ GetTopQueryLabels(query, permutation, true);
- int numActualResults = numDocuments;
+ if (_useShiftedNdcg)
+ {
+ // Set non-best (rank-wise) duplicates to be ignored. Set Score to MinValue, Label to 0
+ IgnoreNonBestDuplicates(labels, scoresToUse, permutation, Dataset.DupeIds, begin, numDocuments);
+ }
- // If the const function is ContinuousWeightedRanknet, update output scores
- if (_costFunctionParam == 'c')
- {
- for (int i = begin; i < begin + numDocuments; ++i)
+ int numActualResults = numDocuments;
+
+ // If the const function is ContinuousWeightedRanknet, update output scores
+ if (_costFunctionParam == 'c')
{
- if (pScores[i] == double.MinValue)
- {
- numActualResults--;
- }
- else
+ for (int i = begin; i < begin + numDocuments; ++i)
{
- pScores[i] = pScores[i] * (1.0 - pLabels[i] * 1.0 / (20.0 * Dataset.DatasetSkeleton.LabelGainMap.Length));
+ if (pScores[i] == double.MinValue)
+ {
+ numActualResults--;
+ }
+ else
+ {
+ pScores[i] = pScores[i] * (1.0 - pLabels[i] * 1.0 / (20.0 * Dataset.DatasetSkeleton.LabelGainMap.Length));
+ }
}
}
- }
- // Continuous cost function and shifted NDCG require a re-sort and recomputation of maxDCG
- // (Change of scores in the former and scores and labels in the latter)
- if (!_useDcg && (_costFunctionParam == 'c' || _useShiftedNdcg))
- {
- PermutationSort(permutation, scoresToUse, labels, numDocuments, begin);
- inverseMaxDcg = 1.0 / DcgCalculator.MaxDcgQuery(labels, begin, numDocuments, numDocuments, _labelCounts[query]);
- }
- // A constant related to secondary labels, which does not exist in the current codebase.
- const bool secondaryIsolabelExclusive = false;
- GetDerivatives(numDocuments, begin, pPermutation, pLabels,
- pScores, pLambdas, pWeights, pDiscount,
- inverseMaxDcg, pGainLabels,
- _secondaryMetricShare, secondaryIsolabelExclusive, secondaryInverseMaxDcg, pSecondaryGains,
- pSigmoidTable, _minScore, _maxScore, _sigmoidTable.Length, _scoreToSigmoidTableFactor,
- _costFunctionParam, _distanceWeight2, numActualResults, &lambdaSum, double.MinValue,
- _baselineAlphaCurrent, baselineDcgGap);
-
-#else
- if (_useShiftedNdcg || _costFunctionParam == 'c' || _distanceWeight2 || _normalizeQueryLambdas)
- {
- throw new Exception("Shifted NDCG / ContinuousWeightedRanknet / distanceWeight2 / normalized lambdas are only supported by unmanaged code");
+ // Continuous cost function and shifted NDCG require a re-sort and recomputation of maxDCG
+ // (Change of scores in the former and scores and labels in the latter)
+ if (!_useDcg && (_costFunctionParam == 'c' || _useShiftedNdcg))
+ {
+ PermutationSort(permutation, scoresToUse, labels, numDocuments, begin);
+ inverseMaxDcg = 1.0 / DcgCalculator.MaxDcgQuery(labels, begin, numDocuments, numDocuments, _labelCounts[query]);
+ }
+ // A constant related to secondary labels, which does not exist in the current codebase.
+ const bool secondaryIsolabelExclusive = false;
+ GetDerivatives(numDocuments, begin, pPermutation, pLabels,
+ pScores, pLambdas, pWeights, pDiscount,
+ inverseMaxDcg, pGainLabels,
+ _secondaryMetricShare, secondaryIsolabelExclusive, secondaryInverseMaxDcg, pSecondaryGains,
+ pSigmoidTable, _minScore, _maxScore, _sigmoidTable.Length, _scoreToSigmoidTableFactor,
+ _costFunctionParam, _distanceWeight2, numActualResults, &lambdaSum, double.MinValue,
+ _baselineAlphaCurrent, baselineDcgGap);
}
-
- var comparer = _comparers[threadIndex];
- comparer.Scores = scoresToUse;
- comparer.Labels = labels;
- comparer.ScoresOffset = begin;
- comparer.LabelsOffset = begin;
- Array.Sort(permutation, 0, numDocuments, comparer);
-
- // go over all pairs
- double scoreHighMinusLow;
- double lambdaP;
- double weightP;
- double deltaNdcgP;
- for (int i = 0; i < numDocuments; ++i)
+ else
{
- int high = begin + pPermutation[i];
- if (pLabels[high] == 0)
- continue;
- double deltaLambdasHigh = 0;
- double deltaWeightsHigh = 0;
+ if (_useShiftedNdcg || _costFunctionParam == 'c' || _distanceWeight2 || _normalizeQueryLambdas)
+ {
+ throw new Exception("Shifted NDCG / ContinuousWeightedRanknet / distanceWeight2 / normalized lambdas are only supported by unmanaged code");
+ }
- for (int j = 0; j < numDocuments; ++j)
+ var comparer = _comparers[threadIndex];
+ comparer.Scores = scoresToUse;
+ comparer.Labels = labels;
+ comparer.ScoresOffset = begin;
+ comparer.LabelsOffset = begin;
+ Array.Sort(permutation, 0, numDocuments, comparer);
+
+ // go over all pairs
+ double scoreHighMinusLow;
+ double lambdaP;
+ double weightP;
+ double deltaNdcgP;
+ for (int i = 0; i < numDocuments; ++i)
{
- // only consider pairs with different labels, where "high" has a higher label than "low"
- if (i == j)
- continue;
- int low = begin + pPermutation[j];
- if (pLabels[high] <= pLabels[low])
+ int high = begin + pPermutation[i];
+ if (pLabels[high] == 0)
continue;
+ double deltaLambdasHigh = 0;
+ double deltaWeightsHigh = 0;
- // calculate the lambdaP for this pair
- scoreHighMinusLow = pScores[high] - pScores[low];
-
- if (scoreHighMinusLow <= _minScore)
- lambdaP = _minSigmoid;
- else if (scoreHighMinusLow >= _maxScore)
- lambdaP = _maxSigmoid;
- else
- lambdaP = _sigmoidTable[(int)((scoreHighMinusLow - _minScore) * _scoreToSigmoidTableFactor)];
-
- weightP = lambdaP * (2.0 - lambdaP);
-
- // calculate the deltaNDCGP for this pair
- deltaNdcgP =
- (pGain[pLabels[high]] - pGain[pLabels[low]]) *
- Math.Abs((pDiscount[i] - pDiscount[j])) *
- inverseMaxDcg;
-
- // update lambdas and weights
- deltaLambdasHigh += lambdaP * deltaNdcgP;
- pLambdas[low] -= lambdaP * deltaNdcgP;
- deltaWeightsHigh += weightP * deltaNdcgP;
- pWeights[low] += weightP * deltaNdcgP;
+ for (int j = 0; j < numDocuments; ++j)
+ {
+ // only consider pairs with different labels, where "high" has a higher label than "low"
+ if (i == j)
+ continue;
+ int low = begin + pPermutation[j];
+ if (pLabels[high] <= pLabels[low])
+ continue;
+
+ // calculate the lambdaP for this pair
+ scoreHighMinusLow = pScores[high] - pScores[low];
+
+ if (scoreHighMinusLow <= _minScore)
+ lambdaP = _minSigmoid;
+ else if (scoreHighMinusLow >= _maxScore)
+ lambdaP = _maxSigmoid;
+ else
+ lambdaP = _sigmoidTable[(int)((scoreHighMinusLow - _minScore) * _scoreToSigmoidTableFactor)];
+
+ weightP = lambdaP * (2.0 - lambdaP);
+
+ // calculate the deltaNDCGP for this pair
+ deltaNdcgP =
+ (pGain[pLabels[high]] - pGain[pLabels[low]]) *
+ Math.Abs((pDiscount[i] - pDiscount[j])) *
+ inverseMaxDcg;
+
+ // update lambdas and weights
+ deltaLambdasHigh += lambdaP * deltaNdcgP;
+ pLambdas[low] -= lambdaP * deltaNdcgP;
+ deltaWeightsHigh += weightP * deltaNdcgP;
+ pWeights[low] += weightP * deltaNdcgP;
+ }
+ pLambdas[high] += deltaLambdasHigh;
+ pWeights[high] += deltaWeightsHigh;
}
- pLambdas[high] += deltaLambdasHigh;
- pWeights[high] += deltaWeightsHigh;
}
-#endif
+
if (_normalizeQueryLambdas)
{
if (lambdaSum > 0)
diff --git a/src/Microsoft.ML.FastTree/Microsoft.ML.FastTree.csproj b/src/Microsoft.ML.FastTree/Microsoft.ML.FastTree.csproj
index 144bee96f6..52cc2f11a5 100644
--- a/src/Microsoft.ML.FastTree/Microsoft.ML.FastTree.csproj
+++ b/src/Microsoft.ML.FastTree/Microsoft.ML.FastTree.csproj
@@ -4,7 +4,7 @@
netstandard2.0
Microsoft.ML.FastTree
ML.NET component for FastTree
- $(DefineConstants);USE_FASTTREENATIVE;NO_STORE;CORECLR
+ $(DefineConstants);NO_STORE;CORECLR
true
$(TargetsForTfmSpecificBuildOutput);CopyProjectReferencesToPackage
diff --git a/src/Native/CMakeLists.txt b/src/Native/CMakeLists.txt
index 716636c00e..4d7da7058c 100644
--- a/src/Native/CMakeLists.txt
+++ b/src/Native/CMakeLists.txt
@@ -80,6 +80,11 @@ else()
add_definitions(-Werror) # treat warnings as errors
endif()
+# Set the architecture we are compiling for on APPLE. This lets you cross target from x86_64 -> arm64.
+if(APPLE)
+ set(CMAKE_OSX_ARCHITECTURES ${ARCHITECTURE})
+endif()
+
# Older CMake doesn't support CMAKE_CXX_STANDARD and GCC/Clang need a switch to enable C++ 11
if(${CMAKE_CXX_COMPILER_ID} MATCHES "(Clang|GNU)")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
@@ -193,9 +198,14 @@ function(install_library_and_symbols targetName)
endif()
endfunction()
-add_subdirectory(CpuMathNative)
-add_subdirectory(FastTreeNative)
+if(NOT ${ARCHITECTURE} MATCHES "arm.*")
+ add_subdirectory(CpuMathNative)
+ add_subdirectory(FastTreeNative)
+ add_subdirectory(MklProxyNative)
+ # TODO: once we fix the 4 intel MKL methods, SymSgdNative will need to go back in.
+ add_subdirectory(SymSgdNative)
+endif()
+
add_subdirectory(LdaNative)
add_subdirectory(MatrixFactorizationNative)
-add_subdirectory(SymSgdNative)
-add_subdirectory(MklProxyNative)
+
diff --git a/src/Native/MatrixFactorizationNative/CMakeLists.txt b/src/Native/MatrixFactorizationNative/CMakeLists.txt
index 8fed1afc90..301433d2e0 100644
--- a/src/Native/MatrixFactorizationNative/CMakeLists.txt
+++ b/src/Native/MatrixFactorizationNative/CMakeLists.txt
@@ -1,16 +1,32 @@
project (MatrixFactorizationNative)
add_definitions(-D_SCL_SECURE_NO_WARNINGS)
add_definitions(-DUSEOMP)
-add_definitions(-DUSESSE)
+
+if(NOT ${ARCHITECTURE} MATCHES "arm.*")
+ add_definitions(-DUSESSE)
+endif()
include_directories(libmf)
if(UNIX)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -O3 -pthread -std=c++0x -march=native")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -O3 -pthread -std=c++0x")
+
+ if(NOT ${ARCHITECTURE} MATCHES "arm.*")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
+ endif()
+
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fopenmp")
if (APPLE)
- include_directories("/usr/local/opt/libomp/include")
- link_directories("/usr/local/opt/libomp/lib")
+ # Apple silicon and Intel macs store brew in different locations, this finds it no matter where it is.
+ execute_process(
+ COMMAND brew --prefix libomp
+ RESULT_VARIABLE BREW_LIBOMP
+ OUTPUT_VARIABLE BREW_LIBOMP_PREFIX
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+ )
+
+ include_directories("${BREW_LIBOMP_PREFIX}/include")
+ link_directories("${BREW_LIBOMP_PREFIX}/lib")
endif()
endif()
diff --git a/src/Native/Native.proj b/src/Native/Native.proj
index 46d6006eb2..4955604c9c 100644
--- a/src/Native/Native.proj
+++ b/src/Native/Native.proj
@@ -82,26 +82,32 @@
-
-
-
-
+
-
-
@@ -112,9 +118,10 @@
-
-
diff --git a/src/Native/SymSgdNative/CMakeLists.txt b/src/Native/SymSgdNative/CMakeLists.txt
index 80665883b9..f40d1a46b4 100644
--- a/src/Native/SymSgdNative/CMakeLists.txt
+++ b/src/Native/SymSgdNative/CMakeLists.txt
@@ -33,7 +33,9 @@ else()
endif()
endif()
-find_library(MKL_LIBRARY MklImports HINTS ${MKL_LIB_PATH})
+if(NOT ${ARCHITECTURE} MATCHES "arm.*")
+ find_library(MKL_LIBRARY MklImports HINTS ${MKL_LIB_PATH})
+endif()
add_definitions(-DUSE_OMP)
add_library(SymSgdNative SHARED ${SOURCES} ${RESOURCES})
diff --git a/src/Native/build.cmd b/src/Native/build.cmd
index 529006a03f..f4e6592e31 100644
--- a/src/Native/build.cmd
+++ b/src/Native/build.cmd
@@ -29,6 +29,8 @@ if /i [%1] == [Debug-netfx] ( set CMAKE_BUILD_TYPE=Debug-netfx&&shift&goto
if /i [%1] == [x86] ( set __BuildArch=x86&&set __VCBuildArch=x86&&shift&goto Arg_Loop)
if /i [%1] == [x64] ( set __BuildArch=x64&&set __VCBuildArch=x86_amd64&&shift&goto Arg_Loop)
if /i [%1] == [amd64] ( set __BuildArch=x64&&set __VCBuildArch=x86_amd64&&shift&goto Arg_Loop)
+if /i [%1] == [arm] ( set __BuildArch=arm&&set __VCBuildArch=x86_arm&&shift&goto Arg_Loop)
+if /i [%1] == [arm64] ( set __BuildArch=arm64&&set __VCBuildArch=x86_arm64&&shift&goto Arg_Loop)
if /i [%1] == [--mkllibpath] ( set MKL_LIB_PATH=%2&&shift&goto Arg_Loop)
shift
@@ -67,30 +69,26 @@ exit /b 1
:: Setup vars for VS2019
set __PlatformToolset=v142
set __VSVersion=16 2019
-if NOT "%__BuildArch%" == "arm64" (
- :: Set the environment for the native build
- call "%VS160COMNTOOLS%..\..\VC\Auxiliary\Build\vcvarsall.bat" %__VCBuildArch%
-)
+:: Set the environment for the native build
+call "%VS160COMNTOOLS%..\..\VC\Auxiliary\Build\vcvarsall.bat" %__VCBuildArch%
+
goto :SetupDirs
:VS2017
:: Setup vars for VS2017
set __PlatformToolset=v141
set __VSVersion=15 2017
-if NOT "%__BuildArch%" == "arm64" (
- :: Set the environment for the native build
- call "%VS150COMNTOOLS%..\..\VC\Auxiliary\Build\vcvarsall.bat" %__VCBuildArch%
-)
+:: Set the environment for the native build
+call "%VS150COMNTOOLS%..\..\VC\Auxiliary\Build\vcvarsall.bat" %__VCBuildArch%
+
goto :SetupDirs
:VS2015
:: Setup vars for VS2015build
set __PlatformToolset=v140
set __VSVersion=14 2015
-if NOT "%__BuildArch%" == "arm64" (
- :: Set the environment for the native build
- call "%VS140COMNTOOLS%..\..\VC\vcvarsall.bat" %__VCBuildArch%
-)
+:: Set the environment for the native build
+call "%VS140COMNTOOLS%..\..\VC\vcvarsall.bat" %__VCBuildArch%
:SetupDirs
:: Setup to cmake the native components
diff --git a/src/Native/build.sh b/src/Native/build.sh
index cbc7582ce1..2b126d9bf5 100755
--- a/src/Native/build.sh
+++ b/src/Native/build.sh
@@ -53,7 +53,7 @@ while [ "$1" != "" ]; do
--mkllibrpath)
shift
__mkllibrpath=$1
- ;;
+ ;;
--stripsymbols)
__strip_argument="-DSTRIP_SYMBOLS=true"
;;
@@ -103,6 +103,23 @@ fi
__cmake_defines="${__cmake_defines} -DVERSION_FILE_PATH:STRING=${__versionSourceFile}"
+OS_ARCH=$(uname -m)
+OS=$(uname)
+
+# If we are cross compiling on Linux we need to set the CMAKE_TOOLCHAIN_FILE
+if [[ ( $OS_ARCH == "amd64" || $OS_ARCH == "x86_64" ) && ( $__build_arch == "arm64" || $__build_arch == "arm" ) && $OS != "Darwin" ]] ; then
+ __cmake_defines="${__cmake_defines} -DCMAKE_TOOLCHAIN_FILE=$RootRepo/eng/common/cross/toolchain.cmake"
+ export TARGET_BUILD_ARCH=$__build_arch
+
+# If we are on a Mac we need to let it know the cross architecture to build for.
+# We use x64 for our 64 bit code, but Mac defines it as x86_64.
+elif [[ $OS == "Darwin" && $__build_arch == "x64" ]] ; then
+ __build_arch="x86_64"
+fi
+
+# Set the ARCHITECTURE for all builds
+__cmake_defines="${__cmake_defines} -DARCHITECTURE=${__build_arch}"
+
cd "$__IntermediatesDir"
echo "Building Machine Learning native components from $DIR to $(pwd)"
diff --git a/src/Native/gen-buildsys-win.bat b/src/Native/gen-buildsys-win.bat
index 78f42a9f78..62ef6c4967 100644
--- a/src/Native/gen-buildsys-win.bat
+++ b/src/Native/gen-buildsys-win.bat
@@ -28,7 +28,9 @@ popd
:DoGen
if /i "%3" == "x64" (set __ExtraCmakeParams=%__ExtraCmakeParams% -A x64)
if /i "%3" == "x86" (set __ExtraCmakeParams=%__ExtraCmakeParams% -A Win32)
-"%CMakePath%" "-DCMAKE_BUILD_TYPE=%CMAKE_BUILD_TYPE%" "-DCMAKE_INSTALL_PREFIX=%__CMakeBinDir%" "-DMKL_LIB_PATH=%MKL_LIB_PATH%" -G "Visual Studio %__VSString%" %__ExtraCmakeParams% -B. -H%1
+if /i "%3" == "arm64" (set __ExtraCmakeParams=%__ExtraCmakeParams% -A arm64)
+if /i "%3" == "arm" (set __ExtraCmakeParams=%__ExtraCmakeParams% -A arm)
+"%CMakePath%" "-DCMAKE_BUILD_TYPE=%CMAKE_BUILD_TYPE%" "-DCMAKE_INSTALL_PREFIX=%__CMakeBinDir%" "-DMKL_LIB_PATH=%MKL_LIB_PATH%" "-DARCHITECTURE=%3" -G "Visual Studio %__VSString%" %__ExtraCmakeParams% -B. -H%1
endlocal
GOTO :DONE
diff --git a/test/Microsoft.ML.Predictor.Tests/TestPredictors.cs b/test/Microsoft.ML.Predictor.Tests/TestPredictors.cs
index a6f48005f0..f59ca10882 100644
--- a/test/Microsoft.ML.Predictor.Tests/TestPredictors.cs
+++ b/test/Microsoft.ML.Predictor.Tests/TestPredictors.cs
@@ -487,10 +487,10 @@ public void LightGBMClassificationTest()
///
/// This test checks that the run-time behavior of LightGBM does not change by modifying the flags
- /// used by LightGBM with , and that this change does not affect
- /// the features extracted during validation. This is done by checking that an older LightGbm model
+ /// used by LightGBM with , and that this change does not affect
+ /// the features extracted during validation. This is done by checking that an older LightGbm model
/// trained with produces the same baselines as it did before this change.
- ///
+ ///
///
[LightGBMFact]
[TestCategory("Binary")]
@@ -1342,7 +1342,7 @@ public void RegressorSyntheticOlsTest()
float diff = scale * (2 * rgen.NextFloat() - 1) / 3;
boundCost += diff * diff;
noisyInstances.Add(new Instance(inst.Features, inst.Label + diff, inst.Name, false) { Id = inst.Id });
- // Make sure this solver also works, when we have
+ // Make sure this solver also works, when we have
if (subdefined && 2 * noisyInstances.Count >= model.Length)
break;
}
@@ -1783,8 +1783,8 @@ public void NnConfigTests()
[TestCategory("Anomaly")]
public void PcaAnomalyTest()
{
- Run_TrainTest(TestLearners.PCAAnomalyDefault, TestDatasets.mnistOneClass, extraSettings: new[] { "loader=text{sparse+}" }, digitsOfPrecision: 5);
- Run_TrainTest(TestLearners.PCAAnomalyNoNorm, TestDatasets.mnistOneClass, extraSettings: new[] { "loader=text{sparse+}" }, digitsOfPrecision: 5);
+ Run_TrainTest(TestLearners.PCAAnomalyDefault, TestDatasets.mnistOneClass, extraSettings: new[] { "loader=text{sparse+}" }, digitsOfPrecision: 4);
+ Run_TrainTest(TestLearners.PCAAnomalyNoNorm, TestDatasets.mnistOneClass, extraSettings: new[] { "loader=text{sparse+}" }, digitsOfPrecision: 4);
// REVIEW: This next test was misbehaving in a strange way that seems to have gone away
// mysteriously (bad build?).
@@ -2030,7 +2030,7 @@ public void StreamingTimeSeriesAnomalyDetectorTest()
#if OLD_TESTS // REVIEW: Need to port Tremble to the new world.
///
- /// A test for tremble binary classifier using logistic regression
+ /// A test for tremble binary classifier using logistic regression
/// in leaf and interior nodes
///
[Fact(Skip = "Need CoreTLC specific baseline update")]
@@ -2049,7 +2049,7 @@ public void BinaryClassifierTrembleTest()
}
///
- /// A test for tremble multi-class classifier using logistic regression
+ /// A test for tremble multi-class classifier using logistic regression
/// in leaf and interior nodes
///
[Fact(Skip = "Need CoreTLC specific baseline update")]
@@ -2074,7 +2074,7 @@ public void MulticlassClassificationTrembleTest()
[TestCategory("TrembleDecisionTree"), Priority(2)]
public void BinaryClassifierDecisionTreeTest()
{
- var binaryPredictors = new[] { TestLearners.BinaryDecisionTreeDefault, TestLearners.BinaryDecisionTreeGini,
+ var binaryPredictors = new[] { TestLearners.BinaryDecisionTreeDefault, TestLearners.BinaryDecisionTreeGini,
TestLearners.BinaryDecisionTreePruning, TestLearners.BinaryDecisionTreeModified };
var binaryClassificationDatasets = new List();
binaryClassificationDatasets.Add(TestDatasets.breastCancer);
@@ -2093,7 +2093,7 @@ public void BinaryClassifierDecisionTreeTest()
[TestCategory("TrembleDecisionTree"), Priority(2)]
public void BinaryClassifierDecisionTreeWeightingTest()
{
- var binaryPredictors = new[] { TestLearners.BinaryDecisionTreeDefault, TestLearners.BinaryDecisionTreeGini,
+ var binaryPredictors = new[] { TestLearners.BinaryDecisionTreeDefault, TestLearners.BinaryDecisionTreeGini,
TestLearners.BinaryDecisionTreePruning, TestLearners.BinaryDecisionTreeModified, TestLearners.BinaryDecisionTreeRewt };
var binaryClassificationDatasets = GetDatasetsForClassificationWeightingPredictorsTest();
RunAllTests(binaryPredictors, binaryClassificationDatasets);
@@ -2108,7 +2108,7 @@ public void BinaryClassifierDecisionTreeWeightingTest()
[TestCategory("TrembleDecisionTree"), Priority(2)]
public void MulticlassClassificationDecisionTreeTest()
{
- var multiClassPredictors = new[] { TestLearners.MulticlassDecisionTreeDefault, TestLearners.MulticlassDecisionTreeGini,
+ var multiClassPredictors = new[] { TestLearners.MulticlassDecisionTreeDefault, TestLearners.MulticlassDecisionTreeGini,
TestLearners.MulticlassDecisionTreePruning, TestLearners.MulticlassDecisionTreeModified };
var multiClassClassificationDatasets = new List();
multiClassClassificationDatasets.Add(TestDatasets.iris);
@@ -2127,7 +2127,7 @@ public void MulticlassClassificationDecisionTreeTest()
[TestCategory("TrembleDecisionTree"), Priority(2)]
public void MulticlassifierDecisionTreeWeightingTest()
{
- var multiClassPredictors = new[] { TestLearners.MulticlassDecisionTreeDefault, TestLearners.MulticlassDecisionTreeGini,
+ var multiClassPredictors = new[] { TestLearners.MulticlassDecisionTreeDefault, TestLearners.MulticlassDecisionTreeGini,
TestLearners.MulticlassDecisionTreePruning, TestLearners.MulticlassDecisionTreeModified };
var binaryClassificationDatasets = new List(GetDatasetsForClassificationWeightingPredictorsTest());
RunAllTests(multiClassPredictors, binaryClassificationDatasets);