From c92df063a9d4d2338da1bf3d676d8063979912a0 Mon Sep 17 00:00:00 2001 From: Ryan Lai Date: Mon, 6 Jan 2020 15:39:29 -0800 Subject: [PATCH 1/6] Add winml macro wrappers on top of google test macros --- winml/test/common/googleTestMacros.h | 27 + winml/test/common/taefTestMacros.h | 0 winml/test/common/test.h | 6 + .../cppwinrt/scenariotestscppwinrt.cpp | 2446 ++++++++--------- .../scenario/cppwinrt/scenariotestscppwinrt.h | 47 + 5 files changed, 1238 insertions(+), 1288 deletions(-) create mode 100644 winml/test/common/googleTestMacros.h create mode 100644 winml/test/common/taefTestMacros.h create mode 100644 winml/test/common/test.h create mode 100644 winml/test/scenario/cppwinrt/scenariotestscppwinrt.h diff --git a/winml/test/common/googleTestMacros.h b/winml/test/common/googleTestMacros.h new file mode 100644 index 0000000000000..f7f8799fb7d2f --- /dev/null +++ b/winml/test/common/googleTestMacros.h @@ -0,0 +1,27 @@ +#include + +#define TEST_GROUP_BEGIN(group_name) +#define TEST_GROUP_END() + +#define WINML_TEST(group_name, test_name, method) \ +static void method(); \ +TEST_F(group_name, test_name) { \ + method(); \ +} + +#define WINML_TEST_CLASS_BEGIN_NO_SETUP(test_class_name) \ + class test_class_name : public ::testing::Test { \ + }; + +#define WINML_TEST_CLASS_BEGIN_WITH_SETUP(test_class_name, setup_method) \ +static void setup_method(); \ + class test_class_name : public ::testing::Test { \ + protected: \ + void SetUp() override { \ + setup_method(); \ + } \ +}; + +#define WINML_TEST_CLASS_END() + +#define WINML_EXPECT_NO_THROW(statement) EXPECT_NO_THROW(statement) \ No newline at end of file diff --git a/winml/test/common/taefTestMacros.h b/winml/test/common/taefTestMacros.h new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/winml/test/common/test.h b/winml/test/common/test.h new file mode 100644 index 0000000000000..6f5306fd50e85 --- /dev/null +++ b/winml/test/common/test.h @@ -0,0 +1,6 @@ +#define BUILD_GOOGLE_TEST +#ifdef BUILD_GOOGLE_TEST +#include "googleTestMacros.h" +#elif BUILD_TAEF_TEST +#include "taefTestMacros.h" +#endif \ No newline at end of file diff --git a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp index 5100dd08b71d1..87dde2d21caf7 100644 --- a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp +++ b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp @@ -30,7 +30,7 @@ #include #include #include - +#include "scenariotestscppwinrt.h" #if __has_include("dxcore.h") #define ENABLE_DXCORE 1 #endif @@ -49,1408 +49,1286 @@ using namespace winrt::Windows::Storage; using namespace winrt::Windows::Storage::Streams; using namespace winrt::Windows::UI::Xaml::Media::Imaging; -class ScenarioCppWinrtTest : public ::testing::Test -{ -protected: - ScenarioCppWinrtTest() - { - init_apartment(); - } -}; +static void ScenarioCppWinrtTestSetup() { + init_apartment(); +} -class ScenarioCppWinrtGpuTest : public ScenarioCppWinrtTest -{ -protected: - void SetUp() override - { - GPUTEST - } +static void ScenarioCppWinrtGpuTestSetup() { + ScenarioCppWinrtTestSetup(); + GPUTEST }; -using ScenarioCppWinrtGpuTestDeathTest = ScenarioCppWinrtGpuTest; - -class ScenarioCppWinrtGpuSkipEdgeCoreTest : public ScenarioCppWinrtGpuTest -{ -protected: - void SetUp() override - { - ScenarioCppWinrtGpuTest::SetUp(); - SKIP_EDGECORE - } + +static void ScenarioCppWinrtGpuSkipEdgeCoreTestSetup() { + ScenarioCppWinrtGpuTestSetup(); + SKIP_EDGECORE }; -TEST_F(ScenarioCppWinrtTest, Sample1) -{ - LearningModel model = nullptr; - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(filePath)); +static void ScenarioCppWinrtTest_Sample1() { + LearningModel model = nullptr; + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + WINML_EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(filePath)); } -ILearningModelFeatureValue MakeTensor(const ITensorFeatureDescriptor& descriptor) -{ - auto dataType = descriptor.TensorKind(); - std::vector shape; - int64_t size = 1; - for (auto&& dim : descriptor.Shape()) - { - if (dim == -1) dim = 1; - shape.push_back(dim); - size *= dim; - } +ILearningModelFeatureValue MakeTensor(const ITensorFeatureDescriptor& descriptor) { + auto dataType = descriptor.TensorKind(); + std::vector shape; + int64_t size = 1; + for (auto&& dim : descriptor.Shape()) { + if (dim == -1) dim = 1; + shape.push_back(dim); + size *= dim; + } - switch (dataType) - { - case TensorKind::Float: - { - std::vector buffer; - buffer.resize(size); - auto ftv = TensorFloat::CreateFromIterable(shape, winrt::single_threaded_vector(std::move(buffer))); - return ftv; + switch (dataType) { + case TensorKind::Float: { + std::vector buffer; + buffer.resize(size); + auto ftv = TensorFloat::CreateFromIterable(shape, winrt::single_threaded_vector(std::move(buffer))); + return ftv; } default: - throw_hresult(E_NOTIMPL); - break; - } + throw_hresult(E_NOTIMPL); + break; + } } -ILearningModelFeatureValue MakeImage(const IImageFeatureDescriptor& /*descriptor*/, winrt::Windows::Foundation::IInspectable data) -{ - VideoFrame videoFrame = nullptr; - if (data != nullptr) - { - SoftwareBitmap sb = nullptr; - data.as(sb); - videoFrame = VideoFrame::CreateWithSoftwareBitmap(sb); - } - else - { - SoftwareBitmap sb = SoftwareBitmap(BitmapPixelFormat::Bgra8, 28, 28); - videoFrame = VideoFrame::CreateWithSoftwareBitmap(sb); - } - auto imageValue = ImageFeatureValue::CreateFromVideoFrame(videoFrame); - return imageValue; +ILearningModelFeatureValue MakeImage(const IImageFeatureDescriptor& /*descriptor*/, winrt::Windows::Foundation::IInspectable data) { + VideoFrame videoFrame = nullptr; + if (data != nullptr) { + SoftwareBitmap sb = nullptr; + data.as(sb); + videoFrame = VideoFrame::CreateWithSoftwareBitmap(sb); + } else { + SoftwareBitmap sb = SoftwareBitmap(BitmapPixelFormat::Bgra8, 28, 28); + videoFrame = VideoFrame::CreateWithSoftwareBitmap(sb); + } + auto imageValue = ImageFeatureValue::CreateFromVideoFrame(videoFrame); + return imageValue; } -ILearningModelFeatureValue FeatureValueFromFeatureValueDescriptor(ILearningModelFeatureDescriptor descriptor, winrt::Windows::Foundation::IInspectable data = nullptr) -{ - auto kind = descriptor.Kind(); - switch (kind) - { - case LearningModelFeatureKind::Image: - { - ImageFeatureDescriptor imageDescriptor = nullptr; - descriptor.as(imageDescriptor); - return MakeImage(imageDescriptor, data); +ILearningModelFeatureValue FeatureValueFromFeatureValueDescriptor(ILearningModelFeatureDescriptor descriptor, winrt::Windows::Foundation::IInspectable data = nullptr) { + auto kind = descriptor.Kind(); + switch (kind) { + case LearningModelFeatureKind::Image: { + ImageFeatureDescriptor imageDescriptor = nullptr; + descriptor.as(imageDescriptor); + return MakeImage(imageDescriptor, data); } case LearningModelFeatureKind::Map: - throw_hresult(E_NOTIMPL); - break; + throw_hresult(E_NOTIMPL); + break; case LearningModelFeatureKind::Sequence: - throw_hresult(E_NOTIMPL); - break; - case LearningModelFeatureKind::Tensor: - { - TensorFeatureDescriptor tensorDescriptor = nullptr; - descriptor.as(tensorDescriptor); - return MakeTensor(tensorDescriptor); + throw_hresult(E_NOTIMPL); + break; + case LearningModelFeatureKind::Tensor: { + TensorFeatureDescriptor tensorDescriptor = nullptr; + descriptor.as(tensorDescriptor); + return MakeTensor(tensorDescriptor); } default: - throw_hresult(E_INVALIDARG); - break; - } + throw_hresult(E_INVALIDARG); + break; + } } // helper method that populates a binding object with default data -static void BindFeatures(LearningModelBinding binding, IVectorView features) -{ - for (auto&& feature : features) - { - auto featureValue = FeatureValueFromFeatureValueDescriptor(feature); - // set an actual buffer here. we're using uninitialized data for simplicity. - binding.Bind(feature.Name(), featureValue); - } +static void BindFeatures(LearningModelBinding binding, IVectorView features) { + for (auto&& feature : features) { + auto featureValue = FeatureValueFromFeatureValueDescriptor(feature); + // set an actual buffer here. we're using uninitialized data for simplicity. + binding.Bind(feature.Name(), featureValue); + } } //! Scenario1 : Load , bind, eval a model using all the system defaults (easy path) -TEST_F(ScenarioCppWinrtTest, Scenario1LoadBindEvalDefault) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - // create a session on the default device - LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); - // create a binding set - LearningModelBinding binding(session); - // bind the input and the output buffers by name - auto inputs = model.InputFeatures(); - for (auto&& input : inputs) - { - auto featureValue = FeatureValueFromFeatureValueDescriptor(input); - // set an actual buffer here. we're using uninitialized data for simplicity. - binding.Bind(input.Name(), featureValue); - } - // run eval - EXPECT_NO_THROW(session.Evaluate(binding, L"")); +static void ScenarioCppWinrtTest_Scenario1LoadBindEvalDefault() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + // create a session on the default device + LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); + // create a binding set + LearningModelBinding binding(session); + // bind the input and the output buffers by name + auto inputs = model.InputFeatures(); + for (auto&& input : inputs) { + auto featureValue = FeatureValueFromFeatureValueDescriptor(input); + // set an actual buffer here. we're using uninitialized data for simplicity. + binding.Bind(input.Name(), featureValue); + } + // run eval + WINML_EXPECT_NO_THROW(session.Evaluate(binding, L"")); } //! Scenario2: Load a model from stream // - winRT, and win32 -TEST_F(ScenarioCppWinrtTest, Scenario2LoadModelFromStream) -{ - // get a stream - std::wstring path = FileHelpers::GetModulePath() + L"model.onnx"; - auto storageFile = StorageFile::GetFileFromPathAsync(path).get(); - - // load the stream - Streams::IRandomAccessStreamReference streamref; - storageFile.as(streamref); - - // load a model - LearningModel model = nullptr; - EXPECT_NO_THROW(model = LearningModel::LoadFromStreamAsync(streamref).get()); - EXPECT_TRUE(model != nullptr); +static void ScenarioCppWinrtTest_Scenario2LoadModelFromStream() { + // get a stream + std::wstring path = FileHelpers::GetModulePath() + L"model.onnx"; + auto storageFile = StorageFile::GetFileFromPathAsync(path).get(); + + // load the stream + Streams::IRandomAccessStreamReference streamref; + storageFile.as(streamref); + + // load a model + LearningModel model = nullptr; + WINML_EXPECT_NO_THROW(model = LearningModel::LoadFromStreamAsync(streamref).get()); + EXPECT_TRUE(model != nullptr); } //! Scenario3: pass a SoftwareBitmap into a model -TEST_F(ScenarioCppWinrtGpuTest, Scenario3SoftwareBitmapInputBinding) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - // create a session on the default device - LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); - // create a binding set - LearningModelBinding binding(session); - // bind the input and the output buffers by name - auto inputs = model.InputFeatures(); - for (auto&& input : inputs) - { - // load the SoftwareBitmap - SoftwareBitmap sb = FileHelpers::GetSoftwareBitmapFromFile(FileHelpers::GetModulePath() + L"fish.png"); - auto videoFrame = VideoFrame::CreateWithSoftwareBitmap(sb); - auto imageValue = ImageFeatureValue::CreateFromVideoFrame(videoFrame); - - EXPECT_NO_THROW(binding.Bind(input.Name(), imageValue)); - } - // run eval - EXPECT_NO_THROW(session.Evaluate(binding, L"")); +static void ScenarioCppWinrtGpuTest_Scenario3SoftwareBitmapInputBinding() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + // create a session on the default device + LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); + // create a binding set + LearningModelBinding binding(session); + // bind the input and the output buffers by name + auto inputs = model.InputFeatures(); + for (auto&& input : inputs) { + // load the SoftwareBitmap + SoftwareBitmap sb = FileHelpers::GetSoftwareBitmapFromFile(FileHelpers::GetModulePath() + L"fish.png"); + auto videoFrame = VideoFrame::CreateWithSoftwareBitmap(sb); + auto imageValue = ImageFeatureValue::CreateFromVideoFrame(videoFrame); + + WINML_EXPECT_NO_THROW(binding.Bind(input.Name(), imageValue)); + } + // run eval + WINML_EXPECT_NO_THROW(session.Evaluate(binding, L"")); } //! Scenario5: run an async eval -winrt::Windows::Foundation::IAsyncOperation DoEvalAsync() -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - // create a session on the default device - LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); - // create a binding set - LearningModelBinding binding(session); - // bind the input and the output buffers by name - auto inputs = model.InputFeatures(); - for (auto&& input : inputs) - { - auto featureValue = FeatureValueFromFeatureValueDescriptor(input); - // set an actual buffer here. we're using uninitialized data for simplicity. - binding.Bind(input.Name(), featureValue); - } - // run eval async - return session.EvaluateAsync(binding, L""); +winrt::Windows::Foundation::IAsyncOperation DoEvalAsync() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + // create a session on the default device + LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); + // create a binding set + LearningModelBinding binding(session); + // bind the input and the output buffers by name + auto inputs = model.InputFeatures(); + for (auto&& input : inputs) { + auto featureValue = FeatureValueFromFeatureValueDescriptor(input); + // set an actual buffer here. we're using uninitialized data for simplicity. + binding.Bind(input.Name(), featureValue); + } + // run eval async + return session.EvaluateAsync(binding, L""); } -TEST_F(ScenarioCppWinrtTest, Scenario5AsyncEval) -{ - auto task = DoEvalAsync(); +static void ScenarioCppWinrtTest_Scenario5AsyncEval() { + auto task = DoEvalAsync(); - while (task.Status() == winrt::Windows::Foundation::AsyncStatus::Started) - { - std::cout << "Waiting...\n"; - Sleep(30); - } - std::cout << "Done\n"; - EXPECT_NO_THROW(task.get()); + while (task.Status() == winrt::Windows::Foundation::AsyncStatus::Started) { + std::cout << "Waiting...\n"; + Sleep(30); + } + std::cout << "Done\n"; + WINML_EXPECT_NO_THROW(task.get()); } //! Scenario6: use BindInputWithProperties - BitmapBounds, BitmapPixelFormat // apparently this scenario is cut for rs5. - not cut, just rewprked. move props // to the image value when that is checked in. -TEST_F(ScenarioCppWinrtGpuTest, Scenario6BindWithProperties) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - // create a session on the default device - LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); - // create a binding set - LearningModelBinding binding(session); - // bind the input and the output buffers by name - auto inputs = model.InputFeatures(); - for (auto&& input : inputs) - { - SoftwareBitmap sb = SoftwareBitmap(BitmapPixelFormat::Bgra8, 224, 224); - auto videoFrame = VideoFrame::CreateWithSoftwareBitmap(sb); - auto imageValue = ImageFeatureValue::CreateFromVideoFrame(videoFrame); - - PropertySet propertySet; - - // make a BitmapBounds - BitmapBounds bounds; - bounds.X = 0; - bounds.Y = 0; - bounds.Height = 100; - bounds.Width = 100; - - auto bitmapsBoundsProperty = winrt::Windows::Foundation::PropertyValue::CreateUInt32Array({ bounds.X, bounds.Y, bounds.Width, bounds.Height }); - // insert it in the property set - propertySet.Insert(L"BitmapBounds", bitmapsBoundsProperty); - - // make a BitmapPixelFormat - BitmapPixelFormat bitmapPixelFormat = BitmapPixelFormat::Bgra8; - // translate it to an int so it can be used as a PropertyValue; - int intFromBitmapPixelFormat = static_cast(bitmapPixelFormat); - auto bitmapPixelFormatProperty = winrt::Windows::Foundation::PropertyValue::CreateInt32(intFromBitmapPixelFormat); - // insert it in the property set - propertySet.Insert(L"BitmapPixelFormat", bitmapPixelFormatProperty); - - // bind with properties - EXPECT_NO_THROW(binding.Bind(input.Name(), imageValue, propertySet)); - } - // run eval - EXPECT_NO_THROW(session.Evaluate(binding, L"")); +static void ScenarioCppWinrtGpuTest_Scenario6BindWithProperties() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + // create a session on the default device + LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); + // create a binding set + LearningModelBinding binding(session); + // bind the input and the output buffers by name + auto inputs = model.InputFeatures(); + for (auto&& input : inputs) { + SoftwareBitmap sb = SoftwareBitmap(BitmapPixelFormat::Bgra8, 224, 224); + auto videoFrame = VideoFrame::CreateWithSoftwareBitmap(sb); + auto imageValue = ImageFeatureValue::CreateFromVideoFrame(videoFrame); + + PropertySet propertySet; + + // make a BitmapBounds + BitmapBounds bounds; + bounds.X = 0; + bounds.Y = 0; + bounds.Height = 100; + bounds.Width = 100; + + auto bitmapsBoundsProperty = winrt::Windows::Foundation::PropertyValue::CreateUInt32Array({bounds.X, bounds.Y, bounds.Width, bounds.Height}); + // insert it in the property set + propertySet.Insert(L"BitmapBounds", bitmapsBoundsProperty); + + // make a BitmapPixelFormat + BitmapPixelFormat bitmapPixelFormat = BitmapPixelFormat::Bgra8; + // translate it to an int so it can be used as a PropertyValue; + int intFromBitmapPixelFormat = static_cast(bitmapPixelFormat); + auto bitmapPixelFormatProperty = winrt::Windows::Foundation::PropertyValue::CreateInt32(intFromBitmapPixelFormat); + // insert it in the property set + propertySet.Insert(L"BitmapPixelFormat", bitmapPixelFormatProperty); + + // bind with properties + WINML_EXPECT_NO_THROW(binding.Bind(input.Name(), imageValue, propertySet)); + } + // run eval + WINML_EXPECT_NO_THROW(session.Evaluate(binding, L"")); } //! Scenario7: run eval without creating a binding object -TEST_F(ScenarioCppWinrtTest, Scenario7EvalWithNoBind) -{ - auto map = winrt::single_threaded_map(); - - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - // create a session on the default device - LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); - // enumerate feature descriptors and create features (but don't bind them) - auto inputs = model.InputFeatures(); - for (auto&& input : inputs) - { - auto featureValue = FeatureValueFromFeatureValueDescriptor(input); - map.Insert(input.Name(), featureValue); - } - // run eval - EXPECT_NO_THROW(session.EvaluateFeaturesAsync(map, L"").get()); +static void ScenarioCppWinrtTest_Scenario7EvalWithNoBind() { + auto map = winrt::single_threaded_map(); + + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + // create a session on the default device + LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); + // enumerate feature descriptors and create features (but don't bind them) + auto inputs = model.InputFeatures(); + for (auto&& input : inputs) { + auto featureValue = FeatureValueFromFeatureValueDescriptor(input); + map.Insert(input.Name(), featureValue); + } + // run eval + WINML_EXPECT_NO_THROW(session.EvaluateFeaturesAsync(map, L"").get()); } //! Scenario8: choose which device to run the model on - PreferredDeviceType, PreferredDevicePerformance, SetDeviceFromSurface, SetDevice // create a session on the default device -TEST_F(ScenarioCppWinrtTest, Scenario8SetDeviceSampleDefault) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - - LearningModelDevice anyDevice(LearningModelDeviceKind::Default); - LearningModelSession anySession(model, anyDevice); +static void ScenarioCppWinrtTest_Scenario8SetDeviceSampleDefault() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + + LearningModelDevice anyDevice(LearningModelDeviceKind::Default); + LearningModelSession anySession(model, anyDevice); } // create a session on the CPU device -TEST_F(ScenarioCppWinrtTest, Scenario8SetDeviceSampleCPU) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - - LearningModelDevice cpuDevice(LearningModelDeviceKind::Cpu); - LearningModelSession cpuSession(model, cpuDevice); +static void ScenarioCppWinrtTest_Scenario8SetDeviceSampleCPU() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + + LearningModelDevice cpuDevice(LearningModelDeviceKind::Cpu); + LearningModelSession cpuSession(model, cpuDevice); } // create a session on the default DML device -TEST_F(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleDefaultDirectX) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - - LearningModelDevice dmlDeviceDefault(LearningModelDeviceKind::DirectX); - LearningModelSession dmlSessionDefault(model, dmlDeviceDefault); +static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleDefaultDirectX() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + + LearningModelDevice dmlDeviceDefault(LearningModelDeviceKind::DirectX); + LearningModelSession dmlSessionDefault(model, dmlDeviceDefault); } // create a session on the DML device that provides best power -TEST_F(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMinPower) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - - LearningModelDevice dmlDeviceMinPower(LearningModelDeviceKind::DirectXMinPower); - LearningModelSession dmlSessionMinPower(model, dmlDeviceMinPower); +static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMinPower() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + + LearningModelDevice dmlDeviceMinPower(LearningModelDeviceKind::DirectXMinPower); + LearningModelSession dmlSessionMinPower(model, dmlDeviceMinPower); } // create a session on the DML device that provides best perf -TEST_F(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMaxPerf) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - - LearningModelDevice dmlDeviceMaxPerf(LearningModelDeviceKind::DirectXHighPerformance); - LearningModelSession dmlSessionMaxPerf(model, dmlDeviceMaxPerf); +static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMaxPerf() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + + LearningModelDevice dmlDeviceMaxPerf(LearningModelDeviceKind::DirectXHighPerformance); + LearningModelSession dmlSessionMaxPerf(model, dmlDeviceMaxPerf); } // create a session on the same device my camera is on -TEST_F(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMyCameraDevice) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - - auto devices = winrt::Windows::Devices::Enumeration::DeviceInformation::FindAllAsync(winrt::Windows::Devices::Enumeration::DeviceClass::VideoCapture).get(); - hstring deviceId; - if (devices.Size() > 0) - { - auto device = devices.GetAt(0); - deviceId = device.Id(); - auto deviceName = device.Name(); - auto enabled = device.IsEnabled(); - std::cout << "Found device " << deviceName.c_str() << ", enabled = " << enabled << "\n"; - winrt::Windows::Media::Capture::MediaCapture captureManager; - winrt::Windows::Media::Capture::MediaCaptureInitializationSettings settings; - settings.VideoDeviceId(deviceId); - captureManager.InitializeAsync(settings).get(); - auto mediaCaptureSettings = captureManager.MediaCaptureSettings(); - auto direct3D11Device = mediaCaptureSettings.Direct3D11Device(); - LearningModelDevice dmlDeviceCamera = LearningModelDevice::CreateFromDirect3D11Device(direct3D11Device); - LearningModelSession dmlSessionCamera(model, dmlDeviceCamera); - } - else - { - GTEST_SKIP() << "Test skipped because video capture device is missing"; - } +static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMyCameraDevice() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + + auto devices = winrt::Windows::Devices::Enumeration::DeviceInformation::FindAllAsync(winrt::Windows::Devices::Enumeration::DeviceClass::VideoCapture).get(); + hstring deviceId; + if (devices.Size() > 0) { + auto device = devices.GetAt(0); + deviceId = device.Id(); + auto deviceName = device.Name(); + auto enabled = device.IsEnabled(); + std::cout << "Found device " << deviceName.c_str() << ", enabled = " << enabled << "\n"; + winrt::Windows::Media::Capture::MediaCapture captureManager; + winrt::Windows::Media::Capture::MediaCaptureInitializationSettings settings; + settings.VideoDeviceId(deviceId); + captureManager.InitializeAsync(settings).get(); + auto mediaCaptureSettings = captureManager.MediaCaptureSettings(); + auto direct3D11Device = mediaCaptureSettings.Direct3D11Device(); + LearningModelDevice dmlDeviceCamera = LearningModelDevice::CreateFromDirect3D11Device(direct3D11Device); + LearningModelSession dmlSessionCamera(model, dmlDeviceCamera); + } else { + GTEST_SKIP() << "Test skipped because video capture device is missing"; + } } // create a device from D3D11 Device -TEST_F(ScenarioCppWinrtGpuSkipEdgeCoreTest, Scenario8SetDeviceSampleD3D11Device) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - - com_ptr pD3D11Device = nullptr; - com_ptr pContext = nullptr; - D3D_FEATURE_LEVEL fl; - HRESULT result = D3D11CreateDevice( - nullptr, D3D_DRIVER_TYPE::D3D_DRIVER_TYPE_HARDWARE, nullptr, 0, nullptr, 0, - D3D11_SDK_VERSION, pD3D11Device.put(), &fl, pContext.put()); - if (FAILED(result)) - { - GTEST_SKIP() << "Test skipped because d3d11 device is missing"; - } +static void ScenarioCppWinrtGpuSkipEdgeCoreTest_Scenario8SetDeviceSampleD3D11Device() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); - // get dxgiDevice from d3ddevice - com_ptr pDxgiDevice; - pD3D11Device.get()->QueryInterface(pDxgiDevice.put()); + com_ptr pD3D11Device = nullptr; + com_ptr pContext = nullptr; + D3D_FEATURE_LEVEL fl; + HRESULT result = D3D11CreateDevice( + nullptr, D3D_DRIVER_TYPE::D3D_DRIVER_TYPE_HARDWARE, nullptr, 0, nullptr, 0, + D3D11_SDK_VERSION, pD3D11Device.put(), &fl, pContext.put()); + if (FAILED(result)) { + GTEST_SKIP() << "Test skipped because d3d11 device is missing"; + } + + // get dxgiDevice from d3ddevice + com_ptr pDxgiDevice; + pD3D11Device.get()->QueryInterface(pDxgiDevice.put()); - com_ptr<::IInspectable> pInspectable; - CreateDirect3D11DeviceFromDXGIDevice(pDxgiDevice.get(), pInspectable.put()); + com_ptr<::IInspectable> pInspectable; + CreateDirect3D11DeviceFromDXGIDevice(pDxgiDevice.get(), pInspectable.put()); - LearningModelDevice device = LearningModelDevice::CreateFromDirect3D11Device( - pInspectable.as()); - LearningModelSession session(model, device); + LearningModelDevice device = LearningModelDevice::CreateFromDirect3D11Device( + pInspectable.as()); + LearningModelSession session(model, device); } // create a session on the a specific dx device that I chose some other way , note we have to use native interop here and pass a cmd queue -TEST_F(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleCustomCommandQueue) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - - com_ptr pD3D12Device = nullptr; - DeviceHelpers::AdapterEnumerationSupport support; - if (FAILED(DeviceHelpers::GetAdapterEnumerationSupport(&support))) - { - FAIL() << "Unable to load DXGI or DXCore"; - return; - } - HRESULT result = S_OK; - if (support.has_dxgi) - { - EXPECT_NO_THROW(result = D3D12CreateDevice(nullptr, D3D_FEATURE_LEVEL::D3D_FEATURE_LEVEL_12_0, __uuidof(ID3D12Device), reinterpret_cast(pD3D12Device.put()))); - } +static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleCustomCommandQueue() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + + com_ptr pD3D12Device = nullptr; + DeviceHelpers::AdapterEnumerationSupport support; + if (FAILED(DeviceHelpers::GetAdapterEnumerationSupport(&support))) { + FAIL() << "Unable to load DXGI or DXCore"; + return; + } + HRESULT result = S_OK; + if (support.has_dxgi) { + WINML_EXPECT_NO_THROW(result = D3D12CreateDevice(nullptr, D3D_FEATURE_LEVEL::D3D_FEATURE_LEVEL_12_0, __uuidof(ID3D12Device), reinterpret_cast(pD3D12Device.put()))); + } #ifdef ENABLE_DXCORE - if (support.has_dxgi == false) - { - com_ptr spFactory; - DXCoreCreateAdapterFactory(IID_PPV_ARGS(spFactory.put())); - const GUID gpuFilter[] = { DXCORE_ADAPTER_ATTRIBUTE_D3D12_GRAPHICS }; - com_ptr spAdapterList; - spFactory->CreateAdapterList(1, gpuFilter, IID_PPV_ARGS(spAdapterList.put())); - com_ptr spAdapter; - EXPECT_NO_THROW(spAdapterList->GetAdapter(0, IID_PPV_ARGS(spAdapter.put()))); - ::IUnknown* pAdapter = spAdapter.get(); - EXPECT_NO_THROW(result = D3D12CreateDevice(pAdapter, D3D_FEATURE_LEVEL::D3D_FEATURE_LEVEL_12_0, __uuidof(ID3D12Device), reinterpret_cast(pD3D12Device.put()))); - } + if (support.has_dxgi == false) { + com_ptr spFactory; + DXCoreCreateAdapterFactory(IID_PPV_ARGS(spFactory.put())); + const GUID gpuFilter[] = {DXCORE_ADAPTER_ATTRIBUTE_D3D12_GRAPHICS}; + com_ptr spAdapterList; + spFactory->CreateAdapterList(1, gpuFilter, IID_PPV_ARGS(spAdapterList.put())); + com_ptr spAdapter; + WINML_EXPECT_NO_THROW(spAdapterList->GetAdapter(0, IID_PPV_ARGS(spAdapter.put()))); + ::IUnknown* pAdapter = spAdapter.get(); + WINML_EXPECT_NO_THROW(result = D3D12CreateDevice(pAdapter, D3D_FEATURE_LEVEL::D3D_FEATURE_LEVEL_12_0, __uuidof(ID3D12Device), reinterpret_cast(pD3D12Device.put()))); + } #endif - if (FAILED(result)) - { - GTEST_SKIP() << "Test skipped because d3d12 device is missing"; - return; - } - com_ptr dxQueue = nullptr; - D3D12_COMMAND_QUEUE_DESC commandQueueDesc = {}; - commandQueueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT; - pD3D12Device->CreateCommandQueue(&commandQueueDesc, __uuidof(ID3D12CommandQueue), reinterpret_cast(&dxQueue)); - auto factory = get_activation_factory(); + if (FAILED(result)) { + GTEST_SKIP() << "Test skipped because d3d12 device is missing"; + return; + } + com_ptr dxQueue = nullptr; + D3D12_COMMAND_QUEUE_DESC commandQueueDesc = {}; + commandQueueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT; + pD3D12Device->CreateCommandQueue(&commandQueueDesc, __uuidof(ID3D12CommandQueue), reinterpret_cast(&dxQueue)); + auto factory = get_activation_factory(); - com_ptr<::IUnknown> spUnk; - factory->CreateFromD3D12CommandQueue(dxQueue.get(), spUnk.put()); + com_ptr<::IUnknown> spUnk; + factory->CreateFromD3D12CommandQueue(dxQueue.get(), spUnk.put()); - auto dmlDeviceCustom = spUnk.as(); - LearningModelSession dmlSessionCustom(model, dmlDeviceCustom); + auto dmlDeviceCustom = spUnk.as(); + LearningModelSession dmlSessionCustom(model, dmlDeviceCustom); } - //pass a Tensor in as an input GPU -TEST_F(ScenarioCppWinrtGpuTest, DISABLED_Scenario9LoadBindEvalInputTensorGPU) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"fns-candy.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - - com_ptr pD3D12Device; - EXPECT_NO_THROW(D3D12CreateDevice(nullptr, D3D_FEATURE_LEVEL::D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device), pD3D12Device.put_void())); - com_ptr dxQueue; - D3D12_COMMAND_QUEUE_DESC commandQueueDesc = {}; - commandQueueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT; - pD3D12Device->CreateCommandQueue(&commandQueueDesc, __uuidof(ID3D12CommandQueue), dxQueue.put_void()); - auto devicefactory = get_activation_factory(); - auto tensorfactory = get_activation_factory(); - - - com_ptr<::IUnknown> spUnk; - EXPECT_NO_THROW(devicefactory->CreateFromD3D12CommandQueue(dxQueue.get(), spUnk.put())); - - LearningModelDevice dmlDeviceCustom = nullptr; - EXPECT_NO_THROW(spUnk.as(dmlDeviceCustom)); - LearningModelSession dmlSessionCustom = nullptr; - EXPECT_NO_THROW(dmlSessionCustom = LearningModelSession(model, dmlDeviceCustom)); - - LearningModelBinding modelBinding(dmlSessionCustom); - - UINT64 bufferbytesize = 720 * 720 * 3 * sizeof(float); - D3D12_HEAP_PROPERTIES heapProperties = { - D3D12_HEAP_TYPE_DEFAULT, - D3D12_CPU_PAGE_PROPERTY_UNKNOWN, - D3D12_MEMORY_POOL_UNKNOWN, - 0, - 0 - }; - D3D12_RESOURCE_DESC resourceDesc = { - D3D12_RESOURCE_DIMENSION_BUFFER, - 0, - bufferbytesize, - 1, - 1, - 1, - DXGI_FORMAT_UNKNOWN, - { 1, 0 }, - D3D12_TEXTURE_LAYOUT_ROW_MAJOR, - D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS - }; - - com_ptr pGPUResource = nullptr; - pD3D12Device->CreateCommittedResource( - &heapProperties, - D3D12_HEAP_FLAG_NONE, - &resourceDesc, - D3D12_RESOURCE_STATE_COMMON, - nullptr, - __uuidof(ID3D12Resource), - pGPUResource.put_void() - ); - com_ptr<::IUnknown> spUnkTensor; - TensorFloat input1imagetensor(nullptr); - __int64 shape[4] = { 1,3, 720, 720 }; - tensorfactory->CreateFromD3D12Resource(pGPUResource.get(), shape, 4, spUnkTensor.put()); - spUnkTensor.try_as(input1imagetensor); - - auto feature = model.InputFeatures().First(); - EXPECT_NO_THROW(modelBinding.Bind(feature.Current().Name(), input1imagetensor)); - - auto outputtensordescriptor = model.OutputFeatures().First().Current().as(); - auto outputtensorshape = outputtensordescriptor.Shape(); - VideoFrame outputimage( - BitmapPixelFormat::Rgba8, - static_cast(outputtensorshape.GetAt(3)), - static_cast(outputtensorshape.GetAt(2))); - ImageFeatureValue outputTensor = ImageFeatureValue::CreateFromVideoFrame(outputimage); - - EXPECT_NO_THROW(modelBinding.Bind(model.OutputFeatures().First().Current().Name(), outputTensor)); - - // Testing GetAsD3D12Resource - com_ptr pReturnedResource; - input1imagetensor.as()->GetD3D12Resource(pReturnedResource.put()); - EXPECT_EQ(pReturnedResource.get(), pGPUResource.get()); - - // Evaluate the model - winrt::hstring correlationId; - dmlSessionCustom.EvaluateAsync(modelBinding, correlationId).get(); +static void ScenarioCppWinrtGpuTest_DISABLED_Scenario9LoadBindEvalInputTensorGPU() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"fns-candy.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + com_ptr pD3D12Device; + WINML_EXPECT_NO_THROW(D3D12CreateDevice(nullptr, D3D_FEATURE_LEVEL::D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device), pD3D12Device.put_void())); + com_ptr dxQueue; + D3D12_COMMAND_QUEUE_DESC commandQueueDesc = {}; + commandQueueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT; + pD3D12Device->CreateCommandQueue(&commandQueueDesc, __uuidof(ID3D12CommandQueue), dxQueue.put_void()); + auto devicefactory = get_activation_factory(); + auto tensorfactory = get_activation_factory(); + + com_ptr<::IUnknown> spUnk; + WINML_EXPECT_NO_THROW(devicefactory->CreateFromD3D12CommandQueue(dxQueue.get(), spUnk.put())); + + LearningModelDevice dmlDeviceCustom = nullptr; + WINML_EXPECT_NO_THROW(spUnk.as(dmlDeviceCustom)); + LearningModelSession dmlSessionCustom = nullptr; + WINML_EXPECT_NO_THROW(dmlSessionCustom = LearningModelSession(model, dmlDeviceCustom)); + + LearningModelBinding modelBinding(dmlSessionCustom); + + UINT64 bufferbytesize = 720 * 720 * 3 * sizeof(float); + D3D12_HEAP_PROPERTIES heapProperties = { + D3D12_HEAP_TYPE_DEFAULT, + D3D12_CPU_PAGE_PROPERTY_UNKNOWN, + D3D12_MEMORY_POOL_UNKNOWN, + 0, + 0}; + D3D12_RESOURCE_DESC resourceDesc = { + D3D12_RESOURCE_DIMENSION_BUFFER, + 0, + bufferbytesize, + 1, + 1, + 1, + DXGI_FORMAT_UNKNOWN, + {1, 0}, + D3D12_TEXTURE_LAYOUT_ROW_MAJOR, + D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS}; + + com_ptr pGPUResource = nullptr; + pD3D12Device->CreateCommittedResource( + &heapProperties, + D3D12_HEAP_FLAG_NONE, + &resourceDesc, + D3D12_RESOURCE_STATE_COMMON, + nullptr, + __uuidof(ID3D12Resource), + pGPUResource.put_void()); + com_ptr<::IUnknown> spUnkTensor; + TensorFloat input1imagetensor(nullptr); + __int64 shape[4] = {1, 3, 720, 720}; + tensorfactory->CreateFromD3D12Resource(pGPUResource.get(), shape, 4, spUnkTensor.put()); + spUnkTensor.try_as(input1imagetensor); + + auto feature = model.InputFeatures().First(); + WINML_EXPECT_NO_THROW(modelBinding.Bind(feature.Current().Name(), input1imagetensor)); + + auto outputtensordescriptor = model.OutputFeatures().First().Current().as(); + auto outputtensorshape = outputtensordescriptor.Shape(); + VideoFrame outputimage( + BitmapPixelFormat::Rgba8, + static_cast(outputtensorshape.GetAt(3)), + static_cast(outputtensorshape.GetAt(2))); + ImageFeatureValue outputTensor = ImageFeatureValue::CreateFromVideoFrame(outputimage); + + WINML_EXPECT_NO_THROW(modelBinding.Bind(model.OutputFeatures().First().Current().Name(), outputTensor)); + + // Testing GetAsD3D12Resource + com_ptr pReturnedResource; + input1imagetensor.as()->GetD3D12Resource(pReturnedResource.put()); + EXPECT_EQ(pReturnedResource.get(), pGPUResource.get()); + + // Evaluate the model + winrt::hstring correlationId; + dmlSessionCustom.EvaluateAsync(modelBinding, correlationId).get(); } -TEST_F(ScenarioCppWinrtGpuTest, Scenario13SingleModelOnCPUandGPU) -{ - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - LearningModelSession cpuSession(model, LearningModelDevice(LearningModelDeviceKind::Cpu)); - LearningModelSession gpuSession(model, LearningModelDevice(LearningModelDeviceKind::DirectX)); - - LearningModelBinding cpuBinding(cpuSession); - LearningModelBinding gpuBinding(gpuSession); - auto inputs = model.InputFeatures(); - for (auto&& input : inputs) - { - auto cpuFeatureValue = FeatureValueFromFeatureValueDescriptor(input); - cpuBinding.Bind(input.Name(), cpuFeatureValue); - - auto gpuFeatureValue = FeatureValueFromFeatureValueDescriptor(input); - gpuBinding.Bind(input.Name(), gpuFeatureValue); - } +static void ScenarioCppWinrtGpuTest_Scenario13SingleModelOnCPUandGPU() { + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + LearningModelSession cpuSession(model, LearningModelDevice(LearningModelDeviceKind::Cpu)); + LearningModelSession gpuSession(model, LearningModelDevice(LearningModelDeviceKind::DirectX)); + + LearningModelBinding cpuBinding(cpuSession); + LearningModelBinding gpuBinding(gpuSession); + auto inputs = model.InputFeatures(); + for (auto&& input : inputs) { + auto cpuFeatureValue = FeatureValueFromFeatureValueDescriptor(input); + cpuBinding.Bind(input.Name(), cpuFeatureValue); + + auto gpuFeatureValue = FeatureValueFromFeatureValueDescriptor(input); + gpuBinding.Bind(input.Name(), gpuFeatureValue); + } - auto cpuTask = cpuSession.EvaluateAsync(cpuBinding, L"cpu"); - auto gpuTask = gpuSession.EvaluateAsync(gpuBinding, L"gpu"); + auto cpuTask = cpuSession.EvaluateAsync(cpuBinding, L"cpu"); + auto gpuTask = gpuSession.EvaluateAsync(gpuBinding, L"gpu"); - EXPECT_NO_THROW(cpuTask.get()); - EXPECT_NO_THROW(gpuTask.get()); + WINML_EXPECT_NO_THROW(cpuTask.get()); + WINML_EXPECT_NO_THROW(gpuTask.get()); } // Validates when binding input image with free dimensions, the binding step is executed correctly. -TEST_F(ScenarioCppWinrtGpuTest, Scenario11FreeDimensionsTensor) -{ - std::wstring filePath = FileHelpers::GetModulePath() + L"free_dimensional_image_input.onnx"; - // load a model with expected input size: -1 x -1 - auto model = LearningModel::LoadFromFilePath(filePath); - auto session = LearningModelSession(model); - auto binding = LearningModelBinding(session); +static void ScenarioCppWinrtGpuTest_Scenario11FreeDimensionsTensor() { + std::wstring filePath = FileHelpers::GetModulePath() + L"free_dimensional_image_input.onnx"; + // load a model with expected input size: -1 x -1 + auto model = LearningModel::LoadFromFilePath(filePath); + auto session = LearningModelSession(model); + auto binding = LearningModelBinding(session); - VideoFrame inputImage(BitmapPixelFormat::Rgba8, 1000, 1000); - ImageFeatureValue inputimagetensor = ImageFeatureValue::CreateFromVideoFrame(inputImage); + VideoFrame inputImage(BitmapPixelFormat::Rgba8, 1000, 1000); + ImageFeatureValue inputimagetensor = ImageFeatureValue::CreateFromVideoFrame(inputImage); - auto feature = model.InputFeatures().First(); - binding.Bind(feature.Current().Name(), inputimagetensor); - feature.MoveNext(); - binding.Bind(feature.Current().Name(), inputimagetensor); + auto feature = model.InputFeatures().First(); + binding.Bind(feature.Current().Name(), inputimagetensor); + feature.MoveNext(); + binding.Bind(feature.Current().Name(), inputimagetensor); - session.Evaluate(binding, L""); + session.Evaluate(binding, L""); } -TEST_F(ScenarioCppWinrtGpuTest, Scenario11FreeDimensionsImage) -{ - std::wstring filePath = FileHelpers::GetModulePath() + L"free_dimensional_imageDes.onnx"; - // load a model with expected input size: -1 x -1 - auto model = LearningModel::LoadFromFilePath(filePath); - auto session = LearningModelSession(model); - auto binding = LearningModelBinding(session); +static void ScenarioCppWinrtGpuTest_Scenario11FreeDimensionsImage() { + std::wstring filePath = FileHelpers::GetModulePath() + L"free_dimensional_imageDes.onnx"; + // load a model with expected input size: -1 x -1 + auto model = LearningModel::LoadFromFilePath(filePath); + auto session = LearningModelSession(model); + auto binding = LearningModelBinding(session); - VideoFrame inputImage(BitmapPixelFormat::Bgra8, 1000, 1000); - ImageFeatureValue inputimagetensor = ImageFeatureValue::CreateFromVideoFrame(inputImage); + VideoFrame inputImage(BitmapPixelFormat::Bgra8, 1000, 1000); + ImageFeatureValue inputimagetensor = ImageFeatureValue::CreateFromVideoFrame(inputImage); - auto feature = model.InputFeatures().First(); - ImageFeatureDescriptor imageDescriptor = nullptr; - feature.Current().as(imageDescriptor); - binding.Bind(feature.Current().Name(), inputimagetensor); + auto feature = model.InputFeatures().First(); + ImageFeatureDescriptor imageDescriptor = nullptr; + feature.Current().as(imageDescriptor); + binding.Bind(feature.Current().Name(), inputimagetensor); - feature.MoveNext(); - feature.Current().as(imageDescriptor); - binding.Bind(feature.Current().Name(), inputimagetensor); + feature.MoveNext(); + feature.Current().as(imageDescriptor); + binding.Bind(feature.Current().Name(), inputimagetensor); - session.Evaluate(binding, L""); + session.Evaluate(binding, L""); } -struct SwapChainEntry -{ - LearningModelSession session; - LearningModelBinding binding; - winrt::Windows::Foundation::IAsyncOperation activetask; - SwapChainEntry() :session(nullptr), binding(nullptr), activetask(nullptr) - {} +struct SwapChainEntry { + LearningModelSession session; + LearningModelBinding binding; + winrt::Windows::Foundation::IAsyncOperation activetask; + SwapChainEntry() : session(nullptr), binding(nullptr), activetask(nullptr) {} }; -void SubmitEval(LearningModel model, SwapChainEntry *sessionBindings, int swapchaindex) -{ - if (sessionBindings[swapchaindex].activetask != nullptr) - { - //make sure the previously submitted work for this swapchain index is complete before reusing resources - sessionBindings[swapchaindex].activetask.get(); - } - // bind the input and the output buffers by name - auto inputs = model.InputFeatures(); - for (auto&& input : inputs) - { - auto featureValue = FeatureValueFromFeatureValueDescriptor(input); - // set an actual buffer here. we're using uninitialized data for simplicity. - sessionBindings[swapchaindex].binding.Bind(input.Name(), featureValue); - } - // submit an eval and wait for it to finish submitting work - sessionBindings[swapchaindex].activetask = sessionBindings[swapchaindex].session.EvaluateAsync(sessionBindings[swapchaindex].binding, L"0"); - // return without waiting for the submit to finish, setup the completion handler +void SubmitEval(LearningModel model, SwapChainEntry* sessionBindings, int swapchaindex) { + if (sessionBindings[swapchaindex].activetask != nullptr) { + //make sure the previously submitted work for this swapchain index is complete before reusing resources + sessionBindings[swapchaindex].activetask.get(); + } + // bind the input and the output buffers by name + auto inputs = model.InputFeatures(); + for (auto&& input : inputs) { + auto featureValue = FeatureValueFromFeatureValueDescriptor(input); + // set an actual buffer here. we're using uninitialized data for simplicity. + sessionBindings[swapchaindex].binding.Bind(input.Name(), featureValue); + } + // submit an eval and wait for it to finish submitting work + sessionBindings[swapchaindex].activetask = sessionBindings[swapchaindex].session.EvaluateAsync(sessionBindings[swapchaindex].binding, L"0"); + // return without waiting for the submit to finish, setup the completion handler } //Scenario14:Load single model, run it mutliple times on a single gpu device using a fast swapchain pattern -TEST_F(ScenarioCppWinrtGpuTest, Scenario14RunModelSwapchain) -{ - const int swapchainentrycount = 3; - SwapChainEntry sessionBindings[swapchainentrycount]; - - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - // create a session on gpu1 - LearningModelDevice dmlDevice = LearningModelDevice(LearningModelDeviceKind::DirectX); - // create the swapchain style bindings to cycle through - for (int i = 0; i < swapchainentrycount; i++) - { - sessionBindings[i].session = LearningModelSession(model, dmlDevice); - sessionBindings[i].binding = LearningModelBinding(sessionBindings[i].session); - } +static void ScenarioCppWinrtGpuTest_Scenario14RunModelSwapchain() { + const int swapchainentrycount = 3; + SwapChainEntry sessionBindings[swapchainentrycount]; - //submit 10 evaluations to 3 swapchain entries - int swapchaindex = 0; - for (int i = 0; i < 10; i++) - { - swapchaindex = swapchaindex % swapchainentrycount; - SubmitEval(model, sessionBindings, (swapchaindex)++); - } + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + // create a session on gpu1 + LearningModelDevice dmlDevice = LearningModelDevice(LearningModelDeviceKind::DirectX); + // create the swapchain style bindings to cycle through + for (int i = 0; i < swapchainentrycount; i++) { + sessionBindings[i].session = LearningModelSession(model, dmlDevice); + sessionBindings[i].binding = LearningModelBinding(sessionBindings[i].session); + } - //wait for all work to be completed - for (int i = 0; i < swapchainentrycount; i++) - { - if (sessionBindings[i].activetask != nullptr) - { - //make sure the previously submitted work for this swapchain index is compolete before resuing resources - sessionBindings[i].activetask.get(); - } + //submit 10 evaluations to 3 swapchain entries + int swapchaindex = 0; + for (int i = 0; i < 10; i++) { + swapchaindex = swapchaindex % swapchainentrycount; + SubmitEval(model, sessionBindings, (swapchaindex)++); + } + + //wait for all work to be completed + for (int i = 0; i < swapchainentrycount; i++) { + if (sessionBindings[i].activetask != nullptr) { + //make sure the previously submitted work for this swapchain index is compolete before resuing resources + sessionBindings[i].activetask.get(); } + } } -static void LoadBindEval_CustomOperator_CPU(const wchar_t* fileName) -{ - auto customOperatorProvider = winrt::make(); - auto provider = customOperatorProvider.as(); - - LearningModel model = LearningModel::LoadFromFilePath(fileName, provider); - LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); - LearningModelBinding bindings(session); - - auto inputShape = std::vector{ 5 }; - auto inputData = std::vector{ -50.f, -25.f, 0.f, 25.f, 50.f }; - auto inputValue = - TensorFloat::CreateFromIterable( - inputShape, - single_threaded_vector(std::move(inputData)).GetView()); - EXPECT_NO_THROW(bindings.Bind(L"X", inputValue)); - - auto outputValue = TensorFloat::Create(); - EXPECT_NO_THROW(bindings.Bind(L"Y", outputValue)); - - hstring correlationId; - EXPECT_NO_THROW(session.Evaluate(bindings, correlationId)); - - auto buffer = outputValue.GetAsVectorView(); - EXPECT_TRUE(buffer != nullptr); +static void LoadBindEval_CustomOperator_CPU(const wchar_t* fileName) { + auto customOperatorProvider = winrt::make(); + auto provider = customOperatorProvider.as(); + + LearningModel model = LearningModel::LoadFromFilePath(fileName, provider); + LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); + LearningModelBinding bindings(session); + + auto inputShape = std::vector{5}; + auto inputData = std::vector{-50.f, -25.f, 0.f, 25.f, 50.f}; + auto inputValue = + TensorFloat::CreateFromIterable( + inputShape, + single_threaded_vector(std::move(inputData)).GetView()); + WINML_EXPECT_NO_THROW(bindings.Bind(L"X", inputValue)); + + auto outputValue = TensorFloat::Create(); + WINML_EXPECT_NO_THROW(bindings.Bind(L"Y", outputValue)); + + hstring correlationId; + WINML_EXPECT_NO_THROW(session.Evaluate(bindings, correlationId)); + + auto buffer = outputValue.GetAsVectorView(); + EXPECT_TRUE(buffer != nullptr); } //! Scenario17 : Control the dev diagnostics features of WinML Tracing -TEST_F(ScenarioCppWinrtTest, Scenario17DevDiagnostics) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - // create a session on the default device - LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); - // create a binding set - LearningModelBinding binding(session); - // bind the input and the output buffers by name - auto inputs = model.InputFeatures(); - for (auto&& input : inputs) - { - auto featureValue = FeatureValueFromFeatureValueDescriptor(input); - // set an actual buffer here. we're using uninitialized data for simplicity. - binding.Bind(input.Name(), featureValue); - } - session.EvaluationProperties().Insert(L"EnableDebugOutput", nullptr); - // run eval - EXPECT_NO_THROW(session.Evaluate(binding, L"")); +static void ScenarioCppWinrtTest_Scenario17DevDiagnostics() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + // create a session on the default device + LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); + // create a binding set + LearningModelBinding binding(session); + // bind the input and the output buffers by name + auto inputs = model.InputFeatures(); + for (auto&& input : inputs) { + auto featureValue = FeatureValueFromFeatureValueDescriptor(input); + // set an actual buffer here. we're using uninitialized data for simplicity. + binding.Bind(input.Name(), featureValue); + } + session.EvaluationProperties().Insert(L"EnableDebugOutput", nullptr); + // run eval + WINML_EXPECT_NO_THROW(session.Evaluate(binding, L"")); } /** * Custom Operator Tests are labeled as GPU tests because the DML code is interlaced with the custom op code * even though CPU custom ops shouldn't be dependent on GPU functionality. - * These should be reclassed to ScenarioCppWinrtTest once the DML code is decoupled from the custom op code. -**/ + * These should be reclassed to ScenarioCppWinrt once the DML code is decoupled from the custom op code. +**/ // create a session that loads a model with a branch new operator, register the custom operator, and load/bind/eval -TEST_F(ScenarioCppWinrtGpuTest, Scenario20aLoadBindEvalCustomOperatorCPU) -{ - std::wstring filePath = FileHelpers::GetModulePath() + L"noisy_relu.onnx"; - LoadBindEval_CustomOperator_CPU(filePath.c_str()); +static void ScenarioCppWinrtGpuTest_Scenario20aLoadBindEvalCustomOperatorCPU() { + std::wstring filePath = FileHelpers::GetModulePath() + L"noisy_relu.onnx"; + LoadBindEval_CustomOperator_CPU(filePath.c_str()); } // create a session that loads a model with an overridden operator, register the replacement custom operator, and load/bind/eval -TEST_F(ScenarioCppWinrtGpuTest, Scenario20bLoadBindEvalReplacementCustomOperatorCPU) -{ - std::wstring filePath = FileHelpers::GetModulePath() + L"relu.onnx"; - LoadBindEval_CustomOperator_CPU(filePath.c_str()); +static void ScenarioCppWinrtGpuTest_Scenario20bLoadBindEvalReplacementCustomOperatorCPU() { + std::wstring filePath = FileHelpers::GetModulePath() + L"relu.onnx"; + LoadBindEval_CustomOperator_CPU(filePath.c_str()); } //! Scenario21: Load two models, set them up to run chained after one another on the same gpu hardware device -TEST_F(ScenarioCppWinrtGpuTest, DISABLED_Scenario21RunModel2ChainZ) -{ - // load a model, TODO: get a model that has an image descriptor - std::wstring filePath = FileHelpers::GetModulePath() + L"fns-candy.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - // create both session on the default gpu - LearningModelSession session1(model, LearningModelDevice(LearningModelDeviceKind::DirectX)); - LearningModelSession session2(model, LearningModelDevice(LearningModelDeviceKind::DirectX)); - // create both binding sets - LearningModelBinding binding1(session1); - LearningModelBinding binding2(session2); - // get the input descriptor - auto input = model.InputFeatures().GetAt(0); - // load a SoftwareBitmap - auto sb = FileHelpers::GetSoftwareBitmapFromFile(FileHelpers::GetModulePath() + L"fish_720.png"); - auto videoFrame = VideoFrame::CreateWithSoftwareBitmap(sb); - // bind it - binding1.Bind(input.Name(), videoFrame); - // get the output descriptor - auto output = model.OutputFeatures().GetAt(0); - // create an empty output tensor since we don't want the first model to detensorize into an image. - - std::vector shape = { 1, 3, 720, 720 }; - auto outputValue = TensorFloat::Create(shape); // FeatureValueFromFeatureValueDescriptor(input, nullptr); - // now bind the(empty) output so we have a marker to chain with - binding1.Bind(output.Name(), outputValue); - // and leave the output unbound on the second model, we will fetch it later - // run both models async - EXPECT_NO_THROW(session1.EvaluateAsync(binding1, L"")); - - // now bind that output to the next models input - binding2.Bind(input.Name(), outputValue); - - //eval the second model - auto session2AsyncOp = session2.EvaluateAsync(binding2, L""); - - // now get the output don't wait, queue up the next model - auto finalOutput = session2AsyncOp.get().Outputs().First().Current().Value(); +static void ScenarioCppWinrtGpuTest_DISABLED_Scenario21RunModel2ChainZ() { + // load a model, TODO: get a model that has an image descriptor + std::wstring filePath = FileHelpers::GetModulePath() + L"fns-candy.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + // create both session on the default gpu + LearningModelSession session1(model, LearningModelDevice(LearningModelDeviceKind::DirectX)); + LearningModelSession session2(model, LearningModelDevice(LearningModelDeviceKind::DirectX)); + // create both binding sets + LearningModelBinding binding1(session1); + LearningModelBinding binding2(session2); + // get the input descriptor + auto input = model.InputFeatures().GetAt(0); + // load a SoftwareBitmap + auto sb = FileHelpers::GetSoftwareBitmapFromFile(FileHelpers::GetModulePath() + L"fish_720.png"); + auto videoFrame = VideoFrame::CreateWithSoftwareBitmap(sb); + // bind it + binding1.Bind(input.Name(), videoFrame); + // get the output descriptor + auto output = model.OutputFeatures().GetAt(0); + // create an empty output tensor since we don't want the first model to detensorize into an image. + + std::vector shape = {1, 3, 720, 720}; + auto outputValue = TensorFloat::Create(shape); // FeatureValueFromFeatureValueDescriptor(input, nullptr); + // now bind the(empty) output so we have a marker to chain with + binding1.Bind(output.Name(), outputValue); + // and leave the output unbound on the second model, we will fetch it later + // run both models async + WINML_EXPECT_NO_THROW(session1.EvaluateAsync(binding1, L"")); + + // now bind that output to the next models input + binding2.Bind(input.Name(), outputValue); + + //eval the second model + auto session2AsyncOp = session2.EvaluateAsync(binding2, L""); + + // now get the output don't wait, queue up the next model + auto finalOutput = session2AsyncOp.get().Outputs().First().Current().Value(); } -bool VerifyHelper(ImageFeatureValue actual, ImageFeatureValue expected) -{ - auto softwareBitmapActual = actual.VideoFrame().SoftwareBitmap(); - auto softwareBitmapExpected = expected.VideoFrame().SoftwareBitmap(); - EXPECT_EQ(softwareBitmapActual.PixelHeight(), softwareBitmapExpected.PixelHeight()); - EXPECT_EQ(softwareBitmapActual.PixelWidth(), softwareBitmapExpected.PixelWidth()); - EXPECT_EQ(softwareBitmapActual.BitmapPixelFormat(), softwareBitmapExpected.BitmapPixelFormat()); - - // 4 means 4 channels - uint32_t size = 4 * softwareBitmapActual.PixelHeight() * softwareBitmapActual.PixelWidth(); - - winrt::Windows::Storage::Streams::Buffer actualOutputBuffer(size); - winrt::Windows::Storage::Streams::Buffer expectedOutputBuffer(size); - - softwareBitmapActual.CopyToBuffer(actualOutputBuffer); - softwareBitmapExpected.CopyToBuffer(expectedOutputBuffer); - - byte* actualBytes; - actualOutputBuffer.try_as<::Windows::Storage::Streams::IBufferByteAccess>()->Buffer(&actualBytes); - byte* expectedBytes; - expectedOutputBuffer.try_as<::Windows::Storage::Streams::IBufferByteAccess>()->Buffer(&expectedBytes); - - byte* pActualByte = actualBytes; - byte* pExpectedByte = expectedBytes; - - // hard code, might need to be modified later. - const float cMaxErrorRate = 0.06f; - byte epsilon = 20; - - UINT errors = 0; - for (uint32_t i = 0; i < size; i++, pActualByte++, pExpectedByte++) - { - auto diff = (*pActualByte - *pExpectedByte); - if (diff > epsilon) - { - errors++; - } - } - std::cout << "total errors is " << errors << "/" << size << ", errors rate is " << (float)errors / size << "\n"; +bool VerifyHelper(ImageFeatureValue actual, ImageFeatureValue expected) { + auto softwareBitmapActual = actual.VideoFrame().SoftwareBitmap(); + auto softwareBitmapExpected = expected.VideoFrame().SoftwareBitmap(); + EXPECT_EQ(softwareBitmapActual.PixelHeight(), softwareBitmapExpected.PixelHeight()); + EXPECT_EQ(softwareBitmapActual.PixelWidth(), softwareBitmapExpected.PixelWidth()); + EXPECT_EQ(softwareBitmapActual.BitmapPixelFormat(), softwareBitmapExpected.BitmapPixelFormat()); - return ((float)errors / size < cMaxErrorRate); -} + // 4 means 4 channels + uint32_t size = 4 * softwareBitmapActual.PixelHeight() * softwareBitmapActual.PixelWidth(); -TEST_F(ScenarioCppWinrtTest, DISABLED_Scenario22ImageBindingAsCPUTensor) -{ - std::wstring modulePath = FileHelpers::GetModulePath(); - std::wstring inputImagePath = modulePath + L"fish_720.png"; - std::wstring bmImagePath = modulePath + L"bm_fish_720.jpg"; - std::wstring modelPath = modulePath + L"fns-candy.onnx"; + winrt::Windows::Storage::Streams::Buffer actualOutputBuffer(size); + winrt::Windows::Storage::Streams::Buffer expectedOutputBuffer(size); - auto device = LearningModelDevice(LearningModelDeviceKind::Default); - auto model = LearningModel::LoadFromFilePath(modelPath); - auto session = LearningModelSession(model, device); - auto binding = LearningModelBinding(session); + softwareBitmapActual.CopyToBuffer(actualOutputBuffer); + softwareBitmapExpected.CopyToBuffer(expectedOutputBuffer); - SoftwareBitmap softwareBitmap = FileHelpers::GetSoftwareBitmapFromFile(inputImagePath); - softwareBitmap = SoftwareBitmap::Convert(softwareBitmap, BitmapPixelFormat::Bgra8); - - // Put softwareBitmap into buffer - BYTE* pData = nullptr; - UINT32 size = 0; - winrt::Windows::Graphics::Imaging::BitmapBuffer spBitmapBuffer(softwareBitmap.LockBuffer(winrt::Windows::Graphics::Imaging::BitmapBufferAccessMode::Read)); - winrt::Windows::Foundation::IMemoryBufferReference reference = spBitmapBuffer.CreateReference(); - auto spByteAccess = reference.as<::Windows::Foundation::IMemoryBufferByteAccess>(); - spByteAccess->GetBuffer(&pData, &size); - - std::vector shape = { 1, 3, softwareBitmap.PixelHeight() , softwareBitmap.PixelWidth() }; - float* pCPUTensor; - uint32_t uCapacity; - TensorFloat tf = TensorFloat::Create(shape); - com_ptr itn = tf.as(); - itn->GetBuffer(reinterpret_cast(&pCPUTensor), &uCapacity); - - uint32_t height = softwareBitmap.PixelHeight(); - uint32_t width = softwareBitmap.PixelWidth(); - for (UINT32 i = 0; i < size; i += 4) - { - UINT32 pixelInd = i / 4; - pCPUTensor[pixelInd] = (float)pData[i]; - pCPUTensor[(height * width) + pixelInd] = (float)pData[i + 1]; - pCPUTensor[(height * width * 2) + pixelInd] = (float)pData[i + 2]; - } + byte* actualBytes; + actualOutputBuffer.try_as<::Windows::Storage::Streams::IBufferByteAccess>()->Buffer(&actualBytes); + byte* expectedBytes; + expectedOutputBuffer.try_as<::Windows::Storage::Streams::IBufferByteAccess>()->Buffer(&expectedBytes); - // Bind input - binding.Bind(model.InputFeatures().First().Current().Name(), tf); - - // Bind output - auto outputtensordescriptor = model.OutputFeatures().First().Current().as(); - auto outputtensorshape = outputtensordescriptor.Shape(); - VideoFrame outputimage( - BitmapPixelFormat::Bgra8, - static_cast(outputtensorshape.GetAt(3)), - static_cast(outputtensorshape.GetAt(2))); - ImageFeatureValue outputTensor = ImageFeatureValue::CreateFromVideoFrame(outputimage); - EXPECT_NO_THROW(binding.Bind(model.OutputFeatures().First().Current().Name(), outputTensor)); - - // Evaluate the model - winrt::hstring correlationId; - EXPECT_NO_THROW(session.EvaluateAsync(binding, correlationId).get()); - - // Verify the output by comparing with the benchmark image - SoftwareBitmap bm_softwareBitmap = FileHelpers::GetSoftwareBitmapFromFile(bmImagePath); - bm_softwareBitmap = SoftwareBitmap::Convert(bm_softwareBitmap, BitmapPixelFormat::Bgra8); - VideoFrame bm_videoFrame = VideoFrame::CreateWithSoftwareBitmap(bm_softwareBitmap); - ImageFeatureValue bm_imagevalue = ImageFeatureValue::CreateFromVideoFrame(bm_videoFrame); - EXPECT_TRUE(VerifyHelper(bm_imagevalue, outputTensor)); - - // check the output video frame object by saving output image to disk - std::wstring outputDataImageFileName = L"out_cpu_tensor_fish_720.jpg"; - StorageFolder currentfolder = StorageFolder::GetFolderFromPathAsync(modulePath).get(); - StorageFile outimagefile = currentfolder.CreateFileAsync(outputDataImageFileName, CreationCollisionOption::ReplaceExisting).get(); - IRandomAccessStream writestream = outimagefile.OpenAsync(FileAccessMode::ReadWrite).get(); - BitmapEncoder encoder = BitmapEncoder::CreateAsync(BitmapEncoder::JpegEncoderId(), writestream).get(); - // Set the software bitmap - encoder.SetSoftwareBitmap(outputimage.SoftwareBitmap()); - encoder.FlushAsync().get(); -} + byte* pActualByte = actualBytes; + byte* pExpectedByte = expectedBytes; -TEST_F(ScenarioCppWinrtGpuTest, DISABLED_Scenario22ImageBindingAsGPUTensor) -{ - std::wstring modulePath = FileHelpers::GetModulePath(); - std::wstring inputImagePath = modulePath + L"fish_720.png"; - std::wstring bmImagePath = modulePath + L"bm_fish_720.jpg"; - std::wstring modelPath = modulePath + L"fns-candy.onnx"; - std::wstring outputDataImageFileName = L"out_gpu_tensor_fish_720.jpg"; - - SoftwareBitmap softwareBitmap = FileHelpers::GetSoftwareBitmapFromFile(inputImagePath); - softwareBitmap = SoftwareBitmap::Convert(softwareBitmap, BitmapPixelFormat::Bgra8); - - // Put softwareBitmap into cpu buffer - BYTE* pData = nullptr; - UINT32 size = 0; - winrt::Windows::Graphics::Imaging::BitmapBuffer spBitmapBuffer(softwareBitmap.LockBuffer(winrt::Windows::Graphics::Imaging::BitmapBufferAccessMode::Read)); - winrt::Windows::Foundation::IMemoryBufferReference reference = spBitmapBuffer.CreateReference(); - auto spByteAccess = reference.as<::Windows::Foundation::IMemoryBufferByteAccess>(); - spByteAccess->GetBuffer(&pData, &size); - - std::vector shape = { 1, 3, softwareBitmap.PixelHeight() , softwareBitmap.PixelWidth() }; - FLOAT* pCPUTensor; - uint32_t uCapacity; - - // CPU tensorization - TensorFloat tf = TensorFloat::Create(shape); - com_ptr itn = tf.as(); - itn->GetBuffer(reinterpret_cast(&pCPUTensor), &uCapacity); - - uint32_t height = softwareBitmap.PixelHeight(); - uint32_t width = softwareBitmap.PixelWidth(); - for (UINT32 i = 0; i < size; i += 4) - { - UINT32 pixelInd = i / 4; - pCPUTensor[pixelInd] = (FLOAT)pData[i]; - pCPUTensor[(height * width) + pixelInd] = (FLOAT)pData[i + 1]; - pCPUTensor[(height * width * 2) + pixelInd] = (FLOAT)pData[i + 2]; + // hard code, might need to be modified later. + const float cMaxErrorRate = 0.06f; + byte epsilon = 20; + + UINT errors = 0; + for (uint32_t i = 0; i < size; i++, pActualByte++, pExpectedByte++) { + auto diff = (*pActualByte - *pExpectedByte); + if (diff > epsilon) { + errors++; } + } + std::cout << "total errors is " << errors << "/" << size << ", errors rate is " << (float)errors / size << "\n"; - // create the d3d device. - com_ptr pD3D12Device = nullptr; - EXPECT_NO_THROW(D3D12CreateDevice(nullptr, D3D_FEATURE_LEVEL::D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device), reinterpret_cast(&pD3D12Device))); - - // create the command queue. - com_ptr dxQueue = nullptr; - D3D12_COMMAND_QUEUE_DESC commandQueueDesc = {}; - commandQueueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT; - pD3D12Device->CreateCommandQueue(&commandQueueDesc, __uuidof(ID3D12CommandQueue), reinterpret_cast(&dxQueue)); - auto devicefactory = get_activation_factory(); - auto tensorfactory = get_activation_factory(); - com_ptr<::IUnknown> spUnk; - devicefactory->CreateFromD3D12CommandQueue(dxQueue.get(), spUnk.put()); - - LearningModel model(nullptr); - EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(modelPath)); - LearningModelDevice dmlDeviceCustom = nullptr; - EXPECT_NO_THROW(spUnk.as(dmlDeviceCustom)); - LearningModelSession dmlSessionCustom = nullptr; - EXPECT_NO_THROW(dmlSessionCustom = LearningModelSession(model, dmlDeviceCustom)); - LearningModelBinding modelBinding = nullptr; - EXPECT_NO_THROW(modelBinding = LearningModelBinding(dmlSessionCustom)); - - // Create ID3D12GraphicsCommandList and Allocator - D3D12_COMMAND_LIST_TYPE queuetype = dxQueue->GetDesc().Type; - com_ptr alloctor; - com_ptr cmdList; - - pD3D12Device->CreateCommandAllocator( - queuetype, - winrt::guid_of(), - alloctor.put_void()); - - pD3D12Device->CreateCommandList( - 0, - queuetype, - alloctor.get(), - nullptr, - winrt::guid_of(), - cmdList.put_void()); - - // Create Committed Resource - // 3 is number of channels we use. R G B without alpha. - UINT64 bufferbytesize = 3 * sizeof(float) * softwareBitmap.PixelWidth()*softwareBitmap.PixelHeight(); - D3D12_HEAP_PROPERTIES heapProperties = { - D3D12_HEAP_TYPE_DEFAULT, - D3D12_CPU_PAGE_PROPERTY_UNKNOWN, - D3D12_MEMORY_POOL_UNKNOWN, - 0, - 0 - }; - D3D12_RESOURCE_DESC resourceDesc = { - D3D12_RESOURCE_DIMENSION_BUFFER, - 0, - bufferbytesize, - 1, - 1, - 1, - DXGI_FORMAT_UNKNOWN, - { 1, 0 }, - D3D12_TEXTURE_LAYOUT_ROW_MAJOR, - D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS - }; - - com_ptr pGPUResource = nullptr; - com_ptr imageUploadHeap; - pD3D12Device->CreateCommittedResource( - &heapProperties, - D3D12_HEAP_FLAG_NONE, - &resourceDesc, - D3D12_RESOURCE_STATE_COMMON, - nullptr, - __uuidof(ID3D12Resource), - pGPUResource.put_void() - ); - - // Create the GPU upload buffer. - CD3DX12_HEAP_PROPERTIES props(D3D12_HEAP_TYPE_UPLOAD); - auto buffer = CD3DX12_RESOURCE_DESC::Buffer(bufferbytesize); - EXPECT_NO_THROW(pD3D12Device->CreateCommittedResource( - &props, - D3D12_HEAP_FLAG_NONE, - &buffer, - D3D12_RESOURCE_STATE_GENERIC_READ, - nullptr, - __uuidof(ID3D12Resource), - imageUploadHeap.put_void())); - - // Copy from Cpu to GPU - D3D12_SUBRESOURCE_DATA CPUData = {}; - CPUData.pData = reinterpret_cast(pCPUTensor); - CPUData.RowPitch = bufferbytesize; - CPUData.SlicePitch = bufferbytesize; - UpdateSubresources(cmdList.get(), pGPUResource.get(), imageUploadHeap.get(), 0, 0, 1, &CPUData); - - // Close the command list and execute it to begin the initial GPU setup. - EXPECT_NO_THROW(cmdList->Close()); - ID3D12CommandList* ppCommandLists[] = { cmdList.get() }; - dxQueue->ExecuteCommandLists(_countof(ppCommandLists), ppCommandLists); - - // GPU tensorize - com_ptr<::IUnknown> spUnkTensor; - TensorFloat input1imagetensor(nullptr); - __int64 shapes[4] = { 1,3, softwareBitmap.PixelWidth(), softwareBitmap.PixelHeight() }; - tensorfactory->CreateFromD3D12Resource(pGPUResource.get(), shapes, 4, spUnkTensor.put()); - spUnkTensor.try_as(input1imagetensor); - - auto feature = model.InputFeatures().First(); - EXPECT_NO_THROW(modelBinding.Bind(feature.Current().Name(), input1imagetensor)); - - auto outputtensordescriptor = model.OutputFeatures().First().Current().as(); - auto outputtensorshape = outputtensordescriptor.Shape(); - VideoFrame outputimage( - BitmapPixelFormat::Rgba8, - static_cast(outputtensorshape.GetAt(3)), - static_cast(outputtensorshape.GetAt(2))); - ImageFeatureValue outputTensor = ImageFeatureValue::CreateFromVideoFrame(outputimage); - - EXPECT_NO_THROW(modelBinding.Bind(model.OutputFeatures().First().Current().Name(), outputTensor)); - - // Evaluate the model - winrt::hstring correlationId; - dmlSessionCustom.EvaluateAsync(modelBinding, correlationId).get(); - - // Verify the output by comparing with the benchmark image - SoftwareBitmap bm_softwareBitmap = FileHelpers::GetSoftwareBitmapFromFile(bmImagePath); - bm_softwareBitmap = SoftwareBitmap::Convert(bm_softwareBitmap, BitmapPixelFormat::Rgba8); - VideoFrame bm_videoFrame = VideoFrame::CreateWithSoftwareBitmap(bm_softwareBitmap); - ImageFeatureValue bm_imagevalue = ImageFeatureValue::CreateFromVideoFrame(bm_videoFrame); - EXPECT_TRUE(VerifyHelper(bm_imagevalue, outputTensor)); - - - //check the output video frame object - StorageFolder currentfolder = StorageFolder::GetFolderFromPathAsync(modulePath).get(); - StorageFile outimagefile = currentfolder.CreateFileAsync(outputDataImageFileName, CreationCollisionOption::ReplaceExisting).get(); - IRandomAccessStream writestream = outimagefile.OpenAsync(FileAccessMode::ReadWrite).get(); - BitmapEncoder encoder = BitmapEncoder::CreateAsync(BitmapEncoder::JpegEncoderId(), writestream).get(); - // Set the software bitmap - encoder.SetSoftwareBitmap(outputimage.SoftwareBitmap()); - encoder.FlushAsync().get(); + return ((float)errors / size < cMaxErrorRate); } -TEST_F(ScenarioCppWinrtTest, QuantizedModels) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"onnxzoo_lotus_inception_v1-dq.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - // create a session on the default device - LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); - // create a binding set - LearningModelBinding binding(session); - // bind the input and the output buffers by name - auto inputs = model.InputFeatures(); - for (auto&& input : inputs) - { - auto featureValue = FeatureValueFromFeatureValueDescriptor(input); - // set an actual buffer here. we're using uninitialized data for simplicity. - binding.Bind(input.Name(), featureValue); - } - // run eval - EXPECT_NO_THROW(session.Evaluate(binding, filePath)); +static void ScenarioCppWinrtTest_DISABLED_Scenario22ImageBindingAsCPUTensor() { + std::wstring modulePath = FileHelpers::GetModulePath(); + std::wstring inputImagePath = modulePath + L"fish_720.png"; + std::wstring bmImagePath = modulePath + L"bm_fish_720.jpg"; + std::wstring modelPath = modulePath + L"fns-candy.onnx"; + + auto device = LearningModelDevice(LearningModelDeviceKind::Default); + auto model = LearningModel::LoadFromFilePath(modelPath); + auto session = LearningModelSession(model, device); + auto binding = LearningModelBinding(session); + + SoftwareBitmap softwareBitmap = FileHelpers::GetSoftwareBitmapFromFile(inputImagePath); + softwareBitmap = SoftwareBitmap::Convert(softwareBitmap, BitmapPixelFormat::Bgra8); + + // Put softwareBitmap into buffer + BYTE* pData = nullptr; + UINT32 size = 0; + winrt::Windows::Graphics::Imaging::BitmapBuffer spBitmapBuffer(softwareBitmap.LockBuffer(winrt::Windows::Graphics::Imaging::BitmapBufferAccessMode::Read)); + winrt::Windows::Foundation::IMemoryBufferReference reference = spBitmapBuffer.CreateReference(); + auto spByteAccess = reference.as<::Windows::Foundation::IMemoryBufferByteAccess>(); + spByteAccess->GetBuffer(&pData, &size); + + std::vector shape = {1, 3, softwareBitmap.PixelHeight(), softwareBitmap.PixelWidth()}; + float* pCPUTensor; + uint32_t uCapacity; + TensorFloat tf = TensorFloat::Create(shape); + com_ptr itn = tf.as(); + itn->GetBuffer(reinterpret_cast(&pCPUTensor), &uCapacity); + + uint32_t height = softwareBitmap.PixelHeight(); + uint32_t width = softwareBitmap.PixelWidth(); + for (UINT32 i = 0; i < size; i += 4) { + UINT32 pixelInd = i / 4; + pCPUTensor[pixelInd] = (float)pData[i]; + pCPUTensor[(height * width) + pixelInd] = (float)pData[i + 1]; + pCPUTensor[(height * width * 2) + pixelInd] = (float)pData[i + 2]; + } + + // Bind input + binding.Bind(model.InputFeatures().First().Current().Name(), tf); + + // Bind output + auto outputtensordescriptor = model.OutputFeatures().First().Current().as(); + auto outputtensorshape = outputtensordescriptor.Shape(); + VideoFrame outputimage( + BitmapPixelFormat::Bgra8, + static_cast(outputtensorshape.GetAt(3)), + static_cast(outputtensorshape.GetAt(2))); + ImageFeatureValue outputTensor = ImageFeatureValue::CreateFromVideoFrame(outputimage); + WINML_EXPECT_NO_THROW(binding.Bind(model.OutputFeatures().First().Current().Name(), outputTensor)); + + // Evaluate the model + winrt::hstring correlationId; + WINML_EXPECT_NO_THROW(session.EvaluateAsync(binding, correlationId).get()); + + // Verify the output by comparing with the benchmark image + SoftwareBitmap bm_softwareBitmap = FileHelpers::GetSoftwareBitmapFromFile(bmImagePath); + bm_softwareBitmap = SoftwareBitmap::Convert(bm_softwareBitmap, BitmapPixelFormat::Bgra8); + VideoFrame bm_videoFrame = VideoFrame::CreateWithSoftwareBitmap(bm_softwareBitmap); + ImageFeatureValue bm_imagevalue = ImageFeatureValue::CreateFromVideoFrame(bm_videoFrame); + EXPECT_TRUE(VerifyHelper(bm_imagevalue, outputTensor)); + + // check the output video frame object by saving output image to disk + std::wstring outputDataImageFileName = L"out_cpu_tensor_fish_720.jpg"; + StorageFolder currentfolder = StorageFolder::GetFolderFromPathAsync(modulePath).get(); + StorageFile outimagefile = currentfolder.CreateFileAsync(outputDataImageFileName, CreationCollisionOption::ReplaceExisting).get(); + IRandomAccessStream writestream = outimagefile.OpenAsync(FileAccessMode::ReadWrite).get(); + BitmapEncoder encoder = BitmapEncoder::CreateAsync(BitmapEncoder::JpegEncoderId(), writestream).get(); + // Set the software bitmap + encoder.SetSoftwareBitmap(outputimage.SoftwareBitmap()); + encoder.FlushAsync().get(); } -TEST_F(ScenarioCppWinrtGpuTest, MsftQuantizedModels) -{ - // load a model - std::wstring filePath = FileHelpers::GetModulePath() + L"coreml_Resnet50_ImageNet-dq.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::DirectX)); - // create a binding set - LearningModelBinding binding(session); - // bind the input and the output buffers by name - - std::wstring fullImagePath = FileHelpers::GetModulePath() + L"kitten_224.png"; - StorageFile imagefile = StorageFile::GetFileFromPathAsync(fullImagePath).get(); - IRandomAccessStream stream = imagefile.OpenAsync(FileAccessMode::Read).get(); - SoftwareBitmap softwareBitmap = (BitmapDecoder::CreateAsync(stream).get()).GetSoftwareBitmapAsync().get(); - - auto inputs = model.InputFeatures(); - for (auto&& input : inputs) - { - auto featureValue = FeatureValueFromFeatureValueDescriptor(input, softwareBitmap); - // set an actual buffer here. we're using uninitialized data for simplicity. - binding.Bind(input.Name(), featureValue); - } - // run eval - EXPECT_NO_THROW(session.Evaluate(binding, filePath)); +static void ScenarioCppWinrtGpuTest_DISABLED_Scenario22ImageBindingAsGPUTensor() { + std::wstring modulePath = FileHelpers::GetModulePath(); + std::wstring inputImagePath = modulePath + L"fish_720.png"; + std::wstring bmImagePath = modulePath + L"bm_fish_720.jpg"; + std::wstring modelPath = modulePath + L"fns-candy.onnx"; + std::wstring outputDataImageFileName = L"out_gpu_tensor_fish_720.jpg"; + + SoftwareBitmap softwareBitmap = FileHelpers::GetSoftwareBitmapFromFile(inputImagePath); + softwareBitmap = SoftwareBitmap::Convert(softwareBitmap, BitmapPixelFormat::Bgra8); + + // Put softwareBitmap into cpu buffer + BYTE* pData = nullptr; + UINT32 size = 0; + winrt::Windows::Graphics::Imaging::BitmapBuffer spBitmapBuffer(softwareBitmap.LockBuffer(winrt::Windows::Graphics::Imaging::BitmapBufferAccessMode::Read)); + winrt::Windows::Foundation::IMemoryBufferReference reference = spBitmapBuffer.CreateReference(); + auto spByteAccess = reference.as<::Windows::Foundation::IMemoryBufferByteAccess>(); + spByteAccess->GetBuffer(&pData, &size); + + std::vector shape = {1, 3, softwareBitmap.PixelHeight(), softwareBitmap.PixelWidth()}; + FLOAT* pCPUTensor; + uint32_t uCapacity; + + // CPU tensorization + TensorFloat tf = TensorFloat::Create(shape); + com_ptr itn = tf.as(); + itn->GetBuffer(reinterpret_cast(&pCPUTensor), &uCapacity); + + uint32_t height = softwareBitmap.PixelHeight(); + uint32_t width = softwareBitmap.PixelWidth(); + for (UINT32 i = 0; i < size; i += 4) { + UINT32 pixelInd = i / 4; + pCPUTensor[pixelInd] = (FLOAT)pData[i]; + pCPUTensor[(height * width) + pixelInd] = (FLOAT)pData[i + 1]; + pCPUTensor[(height * width * 2) + pixelInd] = (FLOAT)pData[i + 2]; + } + + // create the d3d device. + com_ptr pD3D12Device = nullptr; + WINML_EXPECT_NO_THROW(D3D12CreateDevice(nullptr, D3D_FEATURE_LEVEL::D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device), reinterpret_cast(&pD3D12Device))); + + // create the command queue. + com_ptr dxQueue = nullptr; + D3D12_COMMAND_QUEUE_DESC commandQueueDesc = {}; + commandQueueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT; + pD3D12Device->CreateCommandQueue(&commandQueueDesc, __uuidof(ID3D12CommandQueue), reinterpret_cast(&dxQueue)); + auto devicefactory = get_activation_factory(); + auto tensorfactory = get_activation_factory(); + com_ptr<::IUnknown> spUnk; + devicefactory->CreateFromD3D12CommandQueue(dxQueue.get(), spUnk.put()); + + LearningModel model(nullptr); + WINML_EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(modelPath)); + LearningModelDevice dmlDeviceCustom = nullptr; + WINML_EXPECT_NO_THROW(spUnk.as(dmlDeviceCustom)); + LearningModelSession dmlSessionCustom = nullptr; + WINML_EXPECT_NO_THROW(dmlSessionCustom = LearningModelSession(model, dmlDeviceCustom)); + LearningModelBinding modelBinding = nullptr; + WINML_EXPECT_NO_THROW(modelBinding = LearningModelBinding(dmlSessionCustom)); + + // Create ID3D12GraphicsCommandList and Allocator + D3D12_COMMAND_LIST_TYPE queuetype = dxQueue->GetDesc().Type; + com_ptr alloctor; + com_ptr cmdList; + + pD3D12Device->CreateCommandAllocator( + queuetype, + winrt::guid_of(), + alloctor.put_void()); + + pD3D12Device->CreateCommandList( + 0, + queuetype, + alloctor.get(), + nullptr, + winrt::guid_of(), + cmdList.put_void()); + + // Create Committed Resource + // 3 is number of channels we use. R G B without alpha. + UINT64 bufferbytesize = 3 * sizeof(float) * softwareBitmap.PixelWidth() * softwareBitmap.PixelHeight(); + D3D12_HEAP_PROPERTIES heapProperties = { + D3D12_HEAP_TYPE_DEFAULT, + D3D12_CPU_PAGE_PROPERTY_UNKNOWN, + D3D12_MEMORY_POOL_UNKNOWN, + 0, + 0}; + D3D12_RESOURCE_DESC resourceDesc = { + D3D12_RESOURCE_DIMENSION_BUFFER, + 0, + bufferbytesize, + 1, + 1, + 1, + DXGI_FORMAT_UNKNOWN, + {1, 0}, + D3D12_TEXTURE_LAYOUT_ROW_MAJOR, + D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS}; + + com_ptr pGPUResource = nullptr; + com_ptr imageUploadHeap; + pD3D12Device->CreateCommittedResource( + &heapProperties, + D3D12_HEAP_FLAG_NONE, + &resourceDesc, + D3D12_RESOURCE_STATE_COMMON, + nullptr, + __uuidof(ID3D12Resource), + pGPUResource.put_void()); + + // Create the GPU upload buffer. + CD3DX12_HEAP_PROPERTIES props(D3D12_HEAP_TYPE_UPLOAD); + auto buffer = CD3DX12_RESOURCE_DESC::Buffer(bufferbytesize); + WINML_EXPECT_NO_THROW(pD3D12Device->CreateCommittedResource( + &props, + D3D12_HEAP_FLAG_NONE, + &buffer, + D3D12_RESOURCE_STATE_GENERIC_READ, + nullptr, + __uuidof(ID3D12Resource), + imageUploadHeap.put_void())); + + // Copy from Cpu to GPU + D3D12_SUBRESOURCE_DATA CPUData = {}; + CPUData.pData = reinterpret_cast(pCPUTensor); + CPUData.RowPitch = bufferbytesize; + CPUData.SlicePitch = bufferbytesize; + UpdateSubresources(cmdList.get(), pGPUResource.get(), imageUploadHeap.get(), 0, 0, 1, &CPUData); + + // Close the command list and execute it to begin the initial GPU setup. + WINML_EXPECT_NO_THROW(cmdList->Close()); + ID3D12CommandList* ppCommandLists[] = {cmdList.get()}; + dxQueue->ExecuteCommandLists(_countof(ppCommandLists), ppCommandLists); + + // GPU tensorize + com_ptr<::IUnknown> spUnkTensor; + TensorFloat input1imagetensor(nullptr); + __int64 shapes[4] = {1, 3, softwareBitmap.PixelWidth(), softwareBitmap.PixelHeight()}; + tensorfactory->CreateFromD3D12Resource(pGPUResource.get(), shapes, 4, spUnkTensor.put()); + spUnkTensor.try_as(input1imagetensor); + + auto feature = model.InputFeatures().First(); + WINML_EXPECT_NO_THROW(modelBinding.Bind(feature.Current().Name(), input1imagetensor)); + + auto outputtensordescriptor = model.OutputFeatures().First().Current().as(); + auto outputtensorshape = outputtensordescriptor.Shape(); + VideoFrame outputimage( + BitmapPixelFormat::Rgba8, + static_cast(outputtensorshape.GetAt(3)), + static_cast(outputtensorshape.GetAt(2))); + ImageFeatureValue outputTensor = ImageFeatureValue::CreateFromVideoFrame(outputimage); + + WINML_EXPECT_NO_THROW(modelBinding.Bind(model.OutputFeatures().First().Current().Name(), outputTensor)); + + // Evaluate the model + winrt::hstring correlationId; + dmlSessionCustom.EvaluateAsync(modelBinding, correlationId).get(); + + // Verify the output by comparing with the benchmark image + SoftwareBitmap bm_softwareBitmap = FileHelpers::GetSoftwareBitmapFromFile(bmImagePath); + bm_softwareBitmap = SoftwareBitmap::Convert(bm_softwareBitmap, BitmapPixelFormat::Rgba8); + VideoFrame bm_videoFrame = VideoFrame::CreateWithSoftwareBitmap(bm_softwareBitmap); + ImageFeatureValue bm_imagevalue = ImageFeatureValue::CreateFromVideoFrame(bm_videoFrame); + EXPECT_TRUE(VerifyHelper(bm_imagevalue, outputTensor)); + + //check the output video frame object + StorageFolder currentfolder = StorageFolder::GetFolderFromPathAsync(modulePath).get(); + StorageFile outimagefile = currentfolder.CreateFileAsync(outputDataImageFileName, CreationCollisionOption::ReplaceExisting).get(); + IRandomAccessStream writestream = outimagefile.OpenAsync(FileAccessMode::ReadWrite).get(); + BitmapEncoder encoder = BitmapEncoder::CreateAsync(BitmapEncoder::JpegEncoderId(), writestream).get(); + // Set the software bitmap + encoder.SetSoftwareBitmap(outputimage.SoftwareBitmap()); + encoder.FlushAsync().get(); } -TEST_F(ScenarioCppWinrtGpuTest, DISABLED_SyncVsAsync) -{ - // create model, device and session - LearningModel model = nullptr; - EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(FileHelpers::GetModulePath() + L"fns-candy.onnx")); - - LearningModelSession session = nullptr; - EXPECT_NO_THROW(session = LearningModelSession(model, LearningModelDevice(LearningModelDeviceKind::DirectX))); - - // create the binding - LearningModelBinding modelBinding(session); - - // bind the input - std::wstring fullImagePath = FileHelpers::GetModulePath() + L"fish_720.png"; - StorageFile imagefile = StorageFile::GetFileFromPathAsync(fullImagePath).get(); - IRandomAccessStream stream = imagefile.OpenAsync(FileAccessMode::Read).get(); - SoftwareBitmap softwareBitmap = (BitmapDecoder::CreateAsync(stream).get()).GetSoftwareBitmapAsync().get(); - VideoFrame frame = VideoFrame::CreateWithSoftwareBitmap(softwareBitmap); - - auto imagetensor = ImageFeatureValue::CreateFromVideoFrame(frame); - auto inputFeatureDescriptor = model.InputFeatures().First(); - EXPECT_NO_THROW(modelBinding.Bind(inputFeatureDescriptor.Current().Name(), imagetensor)); - - UINT N = 20; - - auto outputtensordescriptor = model.OutputFeatures().First().Current().as(); - auto outputtensorshape = outputtensordescriptor.Shape(); - VideoFrame outputimage( - BitmapPixelFormat::Rgba8, - static_cast(outputtensorshape.GetAt(3)), - static_cast(outputtensorshape.GetAt(2))); - ImageFeatureValue outputTensor = ImageFeatureValue::CreateFromVideoFrame(outputimage); - EXPECT_NO_THROW(modelBinding.Bind(model.OutputFeatures().First().Current().Name(), outputTensor)); - - // evaluate N times synchronously and time it - auto startSync = std::chrono::high_resolution_clock::now(); - for (UINT i = 0; i < N; i++) - { - session.Evaluate(modelBinding, L""); - } - auto syncTime = std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - startSync); - std::cout << "Synchronous time for " << N << " evaluations: " << syncTime.count() << " milliseconds\n"; - - // evaluate N times Asynchronously and time it - std::vector> tasks; - std::vector bindings(N, nullptr); - - for (size_t i = 0; i < bindings.size(); i++) - { - bindings[i] = LearningModelBinding(session); - bindings[i].Bind(inputFeatureDescriptor.Current().Name(), imagetensor); - bindings[i].Bind( - model.OutputFeatures().First().Current().Name(), - VideoFrame(BitmapPixelFormat::Rgba8, - static_cast(outputtensorshape.GetAt(3)), - static_cast(outputtensorshape.GetAt(2)))); - } +static void ScenarioCppWinrtTest_QuantizedModels() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"onnxzoo_lotus_inception_v1-dq.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + // create a session on the default device + LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::Default)); + // create a binding set + LearningModelBinding binding(session); + // bind the input and the output buffers by name + auto inputs = model.InputFeatures(); + for (auto&& input : inputs) { + auto featureValue = FeatureValueFromFeatureValueDescriptor(input); + // set an actual buffer here. we're using uninitialized data for simplicity. + binding.Bind(input.Name(), featureValue); + } + // run eval + WINML_EXPECT_NO_THROW(session.Evaluate(binding, filePath)); +} - auto startAsync = std::chrono::high_resolution_clock::now(); - for (UINT i = 0; i < N; i++) - { - tasks.emplace_back(session.EvaluateAsync(bindings[i], L"")); - } - // wait for them all to complete - for (auto&& task : tasks) - { - task.get(); - } - auto asyncTime = std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - startAsync); - std::cout << "Asynchronous time for " << N << " evaluations: " << asyncTime.count() << " milliseconds\n"; +static void ScenarioCppWinrtGpuTest_MsftQuantizedModels() { + // load a model + std::wstring filePath = FileHelpers::GetModulePath() + L"coreml_Resnet50_ImageNet-dq.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + LearningModelSession session(model, LearningModelDevice(LearningModelDeviceKind::DirectX)); + // create a binding set + LearningModelBinding binding(session); + // bind the input and the output buffers by name + + std::wstring fullImagePath = FileHelpers::GetModulePath() + L"kitten_224.png"; + StorageFile imagefile = StorageFile::GetFileFromPathAsync(fullImagePath).get(); + IRandomAccessStream stream = imagefile.OpenAsync(FileAccessMode::Read).get(); + SoftwareBitmap softwareBitmap = (BitmapDecoder::CreateAsync(stream).get()).GetSoftwareBitmapAsync().get(); + + auto inputs = model.InputFeatures(); + for (auto&& input : inputs) { + auto featureValue = FeatureValueFromFeatureValueDescriptor(input, softwareBitmap); + // set an actual buffer here. we're using uninitialized data for simplicity. + binding.Bind(input.Name(), featureValue); + } + // run eval + WINML_EXPECT_NO_THROW(session.Evaluate(binding, filePath)); } +static void ScenarioCppWinrtGpuTest_DISABLED_SyncVsAsync() { + // create model, device and session + LearningModel model = nullptr; + WINML_EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(FileHelpers::GetModulePath() + L"fns-candy.onnx")); + + LearningModelSession session = nullptr; + WINML_EXPECT_NO_THROW(session = LearningModelSession(model, LearningModelDevice(LearningModelDeviceKind::DirectX))); + + // create the binding + LearningModelBinding modelBinding(session); + + // bind the input + std::wstring fullImagePath = FileHelpers::GetModulePath() + L"fish_720.png"; + StorageFile imagefile = StorageFile::GetFileFromPathAsync(fullImagePath).get(); + IRandomAccessStream stream = imagefile.OpenAsync(FileAccessMode::Read).get(); + SoftwareBitmap softwareBitmap = (BitmapDecoder::CreateAsync(stream).get()).GetSoftwareBitmapAsync().get(); + VideoFrame frame = VideoFrame::CreateWithSoftwareBitmap(softwareBitmap); + + auto imagetensor = ImageFeatureValue::CreateFromVideoFrame(frame); + auto inputFeatureDescriptor = model.InputFeatures().First(); + WINML_EXPECT_NO_THROW(modelBinding.Bind(inputFeatureDescriptor.Current().Name(), imagetensor)); + + UINT N = 20; + + auto outputtensordescriptor = model.OutputFeatures().First().Current().as(); + auto outputtensorshape = outputtensordescriptor.Shape(); + VideoFrame outputimage( + BitmapPixelFormat::Rgba8, + static_cast(outputtensorshape.GetAt(3)), + static_cast(outputtensorshape.GetAt(2))); + ImageFeatureValue outputTensor = ImageFeatureValue::CreateFromVideoFrame(outputimage); + WINML_EXPECT_NO_THROW(modelBinding.Bind(model.OutputFeatures().First().Current().Name(), outputTensor)); + + // evaluate N times synchronously and time it + auto startSync = std::chrono::high_resolution_clock::now(); + for (UINT i = 0; i < N; i++) { + session.Evaluate(modelBinding, L""); + } + auto syncTime = std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - startSync); + std::cout << "Synchronous time for " << N << " evaluations: " << syncTime.count() << " milliseconds\n"; + + // evaluate N times Asynchronously and time it + std::vector> tasks; + std::vector bindings(N, nullptr); + + for (size_t i = 0; i < bindings.size(); i++) { + bindings[i] = LearningModelBinding(session); + bindings[i].Bind(inputFeatureDescriptor.Current().Name(), imagetensor); + bindings[i].Bind( + model.OutputFeatures().First().Current().Name(), + VideoFrame(BitmapPixelFormat::Rgba8, + static_cast(outputtensorshape.GetAt(3)), + static_cast(outputtensorshape.GetAt(2)))); + } + + auto startAsync = std::chrono::high_resolution_clock::now(); + for (UINT i = 0; i < N; i++) { + tasks.emplace_back(session.EvaluateAsync(bindings[i], L"")); + } + // wait for them all to complete + for (auto&& task : tasks) { + task.get(); + } + auto asyncTime = std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - startAsync); + std::cout << "Asynchronous time for " << N << " evaluations: " << asyncTime.count() << " milliseconds\n"; +} -TEST_F(ScenarioCppWinrtGpuTest, DISABLED_CustomCommandQueueWithFence) -{ - static const wchar_t* const modelFileName = L"fns-candy.onnx"; - static const wchar_t* const inputDataImageFileName = L"fish_720.png"; +static void ScenarioCppWinrtGpuTest_DISABLED_CustomCommandQueueWithFence() { + static const wchar_t* const modelFileName = L"fns-candy.onnx"; + static const wchar_t* const inputDataImageFileName = L"fish_720.png"; - com_ptr d3d12Device; - EXPECT_HRESULT_SUCCEEDED(D3D12CreateDevice(nullptr, D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device), d3d12Device.put_void())); + com_ptr d3d12Device; + EXPECT_HRESULT_SUCCEEDED(D3D12CreateDevice(nullptr, D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device), d3d12Device.put_void())); - D3D12_COMMAND_QUEUE_DESC queueDesc = {}; - queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT; + D3D12_COMMAND_QUEUE_DESC queueDesc = {}; + queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT; - com_ptr queue; - EXPECT_HRESULT_SUCCEEDED(d3d12Device->CreateCommandQueue(&queueDesc, __uuidof(ID3D12CommandQueue), queue.put_void())); + com_ptr queue; + EXPECT_HRESULT_SUCCEEDED(d3d12Device->CreateCommandQueue(&queueDesc, __uuidof(ID3D12CommandQueue), queue.put_void())); - com_ptr fence; - EXPECT_HRESULT_SUCCEEDED(d3d12Device->CreateFence(0, D3D12_FENCE_FLAG_NONE, __uuidof(ID3D12Fence), fence.put_void())); + com_ptr fence; + EXPECT_HRESULT_SUCCEEDED(d3d12Device->CreateFence(0, D3D12_FENCE_FLAG_NONE, __uuidof(ID3D12Fence), fence.put_void())); - auto devicefactory = get_activation_factory(); + auto devicefactory = get_activation_factory(); - com_ptr<::IUnknown> learningModelDeviceUnknown; - EXPECT_HRESULT_SUCCEEDED(devicefactory->CreateFromD3D12CommandQueue(queue.get(), learningModelDeviceUnknown.put())); + com_ptr<::IUnknown> learningModelDeviceUnknown; + EXPECT_HRESULT_SUCCEEDED(devicefactory->CreateFromD3D12CommandQueue(queue.get(), learningModelDeviceUnknown.put())); - LearningModelDevice device = nullptr; - EXPECT_NO_THROW(learningModelDeviceUnknown.as(device)); + LearningModelDevice device = nullptr; + WINML_EXPECT_NO_THROW(learningModelDeviceUnknown.as(device)); - std::wstring modulePath = FileHelpers::GetModulePath(); + std::wstring modulePath = FileHelpers::GetModulePath(); - // WinML model creation - std::wstring fullModelPath = modulePath + modelFileName; - LearningModel model(nullptr); - EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(fullModelPath)); - LearningModelSession modelSession = nullptr; - EXPECT_NO_THROW(modelSession = LearningModelSession(model, device)); - LearningModelBinding modelBinding = nullptr; - EXPECT_NO_THROW(modelBinding = LearningModelBinding(modelSession)); + // WinML model creation + std::wstring fullModelPath = modulePath + modelFileName; + LearningModel model(nullptr); + WINML_EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(fullModelPath)); + LearningModelSession modelSession = nullptr; + WINML_EXPECT_NO_THROW(modelSession = LearningModelSession(model, device)); + LearningModelBinding modelBinding = nullptr; + WINML_EXPECT_NO_THROW(modelBinding = LearningModelBinding(modelSession)); - std::wstring fullImagePath = modulePath + inputDataImageFileName; + std::wstring fullImagePath = modulePath + inputDataImageFileName; - StorageFile imagefile = StorageFile::GetFileFromPathAsync(fullImagePath).get(); - IRandomAccessStream stream = imagefile.OpenAsync(FileAccessMode::Read).get(); - SoftwareBitmap softwareBitmap = (BitmapDecoder::CreateAsync(stream).get()).GetSoftwareBitmapAsync().get(); - VideoFrame frame = VideoFrame::CreateWithSoftwareBitmap(softwareBitmap); - ImageFeatureValue input1imagetensor = ImageFeatureValue::CreateFromVideoFrame(frame); + StorageFile imagefile = StorageFile::GetFileFromPathAsync(fullImagePath).get(); + IRandomAccessStream stream = imagefile.OpenAsync(FileAccessMode::Read).get(); + SoftwareBitmap softwareBitmap = (BitmapDecoder::CreateAsync(stream).get()).GetSoftwareBitmapAsync().get(); + VideoFrame frame = VideoFrame::CreateWithSoftwareBitmap(softwareBitmap); + ImageFeatureValue input1imagetensor = ImageFeatureValue::CreateFromVideoFrame(frame); - auto feature = model.InputFeatures().First(); - EXPECT_NO_THROW(modelBinding.Bind(feature.Current().Name(), input1imagetensor)); + auto feature = model.InputFeatures().First(); + WINML_EXPECT_NO_THROW(modelBinding.Bind(feature.Current().Name(), input1imagetensor)); - auto outputtensordescriptor = model.OutputFeatures().First().Current().as(); - auto outputtensorshape = outputtensordescriptor.Shape(); - VideoFrame outputimage( - BitmapPixelFormat::Rgba8, - static_cast(outputtensorshape.GetAt(3)), - static_cast(outputtensorshape.GetAt(2))); - ImageFeatureValue outputTensor = ImageFeatureValue::CreateFromVideoFrame(outputimage); + auto outputtensordescriptor = model.OutputFeatures().First().Current().as(); + auto outputtensorshape = outputtensordescriptor.Shape(); + VideoFrame outputimage( + BitmapPixelFormat::Rgba8, + static_cast(outputtensorshape.GetAt(3)), + static_cast(outputtensorshape.GetAt(2))); + ImageFeatureValue outputTensor = ImageFeatureValue::CreateFromVideoFrame(outputimage); - EXPECT_NO_THROW(modelBinding.Bind(model.OutputFeatures().First().Current().Name(), outputTensor)); + WINML_EXPECT_NO_THROW(modelBinding.Bind(model.OutputFeatures().First().Current().Name(), outputTensor)); - // Block the queue on the fence, evaluate the model, then queue a signal. The model evaluation should not complete - // until after the wait is unblocked, and the signal should not complete until model evaluation does. This can - // only be true if WinML executes the workload on the supplied queue (instead of using its own). + // Block the queue on the fence, evaluate the model, then queue a signal. The model evaluation should not complete + // until after the wait is unblocked, and the signal should not complete until model evaluation does. This can + // only be true if WinML executes the workload on the supplied queue (instead of using its own). - EXPECT_HRESULT_SUCCEEDED(queue->Wait(fence.get(), 1)); + EXPECT_HRESULT_SUCCEEDED(queue->Wait(fence.get(), 1)); - EXPECT_HRESULT_SUCCEEDED(queue->Signal(fence.get(), 2)); + EXPECT_HRESULT_SUCCEEDED(queue->Signal(fence.get(), 2)); - winrt::hstring correlationId; - winrt::Windows::Foundation::IAsyncOperation asyncOp; - EXPECT_NO_THROW(asyncOp = modelSession.EvaluateAsync(modelBinding, correlationId)); + winrt::hstring correlationId; + winrt::Windows::Foundation::IAsyncOperation asyncOp; + WINML_EXPECT_NO_THROW(asyncOp = modelSession.EvaluateAsync(modelBinding, correlationId)); - Sleep(1000); // Give the model a chance to run (which it shouldn't if everything is working correctly) + Sleep(1000); // Give the model a chance to run (which it shouldn't if everything is working correctly) - // Because we haven't unblocked the wait yet, model evaluation must not have completed (nor the fence signal) - EXPECT_NE(asyncOp.Status(), winrt::Windows::Foundation::AsyncStatus::Completed); - EXPECT_EQ(fence->GetCompletedValue(), 0); + // Because we haven't unblocked the wait yet, model evaluation must not have completed (nor the fence signal) + EXPECT_NE(asyncOp.Status(), winrt::Windows::Foundation::AsyncStatus::Completed); + EXPECT_EQ(fence->GetCompletedValue(), 0); - // Unblock the queue - EXPECT_HRESULT_SUCCEEDED(fence->Signal(1)); + // Unblock the queue + EXPECT_HRESULT_SUCCEEDED(fence->Signal(1)); - // Wait for model evaluation to complete - asyncOp.get(); + // Wait for model evaluation to complete + asyncOp.get(); - // The fence must be signaled by now (because model evaluation has completed) - EXPECT_EQ(fence->GetCompletedValue(), 2); + // The fence must be signaled by now (because model evaluation has completed) + EXPECT_EQ(fence->GetCompletedValue(), 2); } -TEST_F(ScenarioCppWinrtGpuTest, DISABLED_ReuseVideoFrame) -{ - std::wstring modulePath = FileHelpers::GetModulePath(); - std::wstring inputImagePath = modulePath + L"fish_720.png"; - std::wstring bmImagePath = modulePath + L"bm_fish_720.jpg"; - std::wstring modelPath = modulePath + L"fns-candy.onnx"; - - std::vector deviceKinds = { LearningModelDeviceKind::Cpu, LearningModelDeviceKind::DirectX }; - std::vector videoFrameSources; - DeviceHelpers::AdapterEnumerationSupport support; - DeviceHelpers::GetAdapterEnumerationSupport(&support); - if (support.has_dxgi) - { - videoFrameSources = { "SoftwareBitmap", "Direct3DSurface" }; - } - else { - videoFrameSources = { "SoftwareBitmap" }; - - } +static void ScenarioCppWinrtGpuTest_DISABLED_ReuseVideoFrame() { + std::wstring modulePath = FileHelpers::GetModulePath(); + std::wstring inputImagePath = modulePath + L"fish_720.png"; + std::wstring bmImagePath = modulePath + L"bm_fish_720.jpg"; + std::wstring modelPath = modulePath + L"fns-candy.onnx"; + + std::vector deviceKinds = {LearningModelDeviceKind::Cpu, LearningModelDeviceKind::DirectX}; + std::vector videoFrameSources; + DeviceHelpers::AdapterEnumerationSupport support; + DeviceHelpers::GetAdapterEnumerationSupport(&support); + if (support.has_dxgi) { + videoFrameSources = {"SoftwareBitmap", "Direct3DSurface"}; + } else { + videoFrameSources = {"SoftwareBitmap"}; + } - for (auto deviceKind : deviceKinds) - { - auto device = LearningModelDevice(deviceKind); - auto model = LearningModel::LoadFromFilePath(modelPath); - auto session = LearningModelSession(model, device); - auto binding = LearningModelBinding(session); - for (auto videoFrameSource : videoFrameSources) - { - VideoFrame reuseVideoFrame = nullptr; - if (videoFrameSource == "SoftwareBitmap") - { - reuseVideoFrame = VideoFrame::CreateWithSoftwareBitmap(SoftwareBitmap(BitmapPixelFormat::Bgra8, 720, 720)); - } - else - { - reuseVideoFrame = VideoFrame::CreateAsDirect3D11SurfaceBacked(DirectXPixelFormat::B8G8R8X8UIntNormalized, 720, 720); - } - for (uint32_t i = 0; i < 3; ++i) - { - SoftwareBitmap softwareBitmap = FileHelpers::GetSoftwareBitmapFromFile(inputImagePath); - VideoFrame videoFrame = VideoFrame::CreateWithSoftwareBitmap(softwareBitmap); - // reuse video frame - videoFrame.CopyToAsync(reuseVideoFrame).get(); - - // bind input - binding.Bind(model.InputFeatures().First().Current().Name(), reuseVideoFrame); - - // bind output - VideoFrame outputimage(BitmapPixelFormat::Bgra8, 720, 720); - ImageFeatureValue outputTensor = ImageFeatureValue::CreateFromVideoFrame(outputimage); - EXPECT_NO_THROW(binding.Bind(model.OutputFeatures().First().Current().Name(), outputTensor)); - - // evaluate - winrt::hstring correlationId; - EXPECT_NO_THROW(session.EvaluateAsync(binding, correlationId).get()); - - // verify result - SoftwareBitmap bm_softwareBitmap = FileHelpers::GetSoftwareBitmapFromFile(bmImagePath); - bm_softwareBitmap = SoftwareBitmap::Convert(bm_softwareBitmap, BitmapPixelFormat::Bgra8); - VideoFrame bm_videoFrame = VideoFrame::CreateWithSoftwareBitmap(bm_softwareBitmap); - ImageFeatureValue bm_imagevalue = ImageFeatureValue::CreateFromVideoFrame(bm_videoFrame); - EXPECT_TRUE(VerifyHelper(bm_imagevalue, outputTensor)); - } - } + for (auto deviceKind : deviceKinds) { + auto device = LearningModelDevice(deviceKind); + auto model = LearningModel::LoadFromFilePath(modelPath); + auto session = LearningModelSession(model, device); + auto binding = LearningModelBinding(session); + for (auto videoFrameSource : videoFrameSources) { + VideoFrame reuseVideoFrame = nullptr; + if (videoFrameSource == "SoftwareBitmap") { + reuseVideoFrame = VideoFrame::CreateWithSoftwareBitmap(SoftwareBitmap(BitmapPixelFormat::Bgra8, 720, 720)); + } else { + reuseVideoFrame = VideoFrame::CreateAsDirect3D11SurfaceBacked(DirectXPixelFormat::B8G8R8X8UIntNormalized, 720, 720); + } + for (uint32_t i = 0; i < 3; ++i) { + SoftwareBitmap softwareBitmap = FileHelpers::GetSoftwareBitmapFromFile(inputImagePath); + VideoFrame videoFrame = VideoFrame::CreateWithSoftwareBitmap(softwareBitmap); + // reuse video frame + videoFrame.CopyToAsync(reuseVideoFrame).get(); + + // bind input + binding.Bind(model.InputFeatures().First().Current().Name(), reuseVideoFrame); + + // bind output + VideoFrame outputimage(BitmapPixelFormat::Bgra8, 720, 720); + ImageFeatureValue outputTensor = ImageFeatureValue::CreateFromVideoFrame(outputimage); + WINML_EXPECT_NO_THROW(binding.Bind(model.OutputFeatures().First().Current().Name(), outputTensor)); + + // evaluate + winrt::hstring correlationId; + WINML_EXPECT_NO_THROW(session.EvaluateAsync(binding, correlationId).get()); + + // verify result + SoftwareBitmap bm_softwareBitmap = FileHelpers::GetSoftwareBitmapFromFile(bmImagePath); + bm_softwareBitmap = SoftwareBitmap::Convert(bm_softwareBitmap, BitmapPixelFormat::Bgra8); + VideoFrame bm_videoFrame = VideoFrame::CreateWithSoftwareBitmap(bm_softwareBitmap); + ImageFeatureValue bm_imagevalue = ImageFeatureValue::CreateFromVideoFrame(bm_videoFrame); + EXPECT_TRUE(VerifyHelper(bm_imagevalue, outputTensor)); + } } + } } - -TEST_F(ScenarioCppWinrtTest, EncryptedStream) -{ - // get a stream - std::wstring path = FileHelpers::GetModulePath() + L"model.onnx"; - auto storageFile = StorageFile::GetFileFromPathAsync(path).get(); - auto fileBuffer = winrt::Windows::Storage::FileIO::ReadBufferAsync(storageFile).get(); - - // encrypt - auto algorithmName = winrt::Windows::Security::Cryptography::Core::SymmetricAlgorithmNames::AesCbcPkcs7(); - auto algorithm = winrt::Windows::Security::Cryptography::Core::SymmetricKeyAlgorithmProvider::OpenAlgorithm(algorithmName); - uint32_t keyLength = 32; - auto keyBuffer = winrt::Windows::Security::Cryptography::CryptographicBuffer::GenerateRandom(keyLength); - auto key = algorithm.CreateSymmetricKey(keyBuffer); - auto iv = winrt::Windows::Security::Cryptography::CryptographicBuffer::GenerateRandom(algorithm.BlockLength()); - auto encryptedBuffer = winrt::Windows::Security::Cryptography::Core::CryptographicEngine::Encrypt(key, fileBuffer, iv); - - // verify loading the encrypted stream fails appropriately. - auto encryptedStream = InMemoryRandomAccessStream(); - encryptedStream.WriteAsync(encryptedBuffer).get(); - EXPECT_THROW_SPECIFIC(LearningModel::LoadFromStream(RandomAccessStreamReference::CreateFromStream(encryptedStream)), - winrt::hresult_error, - [](const winrt::hresult_error& e) -> bool - { - return e.code() == E_INVALIDARG; - }); - - // now decrypt - auto decryptedBuffer = winrt::Windows::Security::Cryptography::Core::CryptographicEngine::Decrypt(key, encryptedBuffer, iv); - auto decryptedStream = InMemoryRandomAccessStream(); - decryptedStream.WriteAsync(decryptedBuffer).get(); - - // load! - LearningModel model = nullptr; - EXPECT_NO_THROW(model = LearningModel::LoadFromStream(RandomAccessStreamReference::CreateFromStream(decryptedStream))); - LearningModelSession session = nullptr; - EXPECT_NO_THROW(session = LearningModelSession(model)); +static void ScenarioCppWinrtTest_EncryptedStream() { + // get a stream + std::wstring path = FileHelpers::GetModulePath() + L"model.onnx"; + auto storageFile = StorageFile::GetFileFromPathAsync(path).get(); + auto fileBuffer = winrt::Windows::Storage::FileIO::ReadBufferAsync(storageFile).get(); + + // encrypt + auto algorithmName = winrt::Windows::Security::Cryptography::Core::SymmetricAlgorithmNames::AesCbcPkcs7(); + auto algorithm = winrt::Windows::Security::Cryptography::Core::SymmetricKeyAlgorithmProvider::OpenAlgorithm(algorithmName); + uint32_t keyLength = 32; + auto keyBuffer = winrt::Windows::Security::Cryptography::CryptographicBuffer::GenerateRandom(keyLength); + auto key = algorithm.CreateSymmetricKey(keyBuffer); + auto iv = winrt::Windows::Security::Cryptography::CryptographicBuffer::GenerateRandom(algorithm.BlockLength()); + auto encryptedBuffer = winrt::Windows::Security::Cryptography::Core::CryptographicEngine::Encrypt(key, fileBuffer, iv); + + // verify loading the encrypted stream fails appropriately. + auto encryptedStream = InMemoryRandomAccessStream(); + encryptedStream.WriteAsync(encryptedBuffer).get(); + EXPECT_THROW_SPECIFIC(LearningModel::LoadFromStream(RandomAccessStreamReference::CreateFromStream(encryptedStream)), + winrt::hresult_error, + [](const winrt::hresult_error& e) -> bool { + return e.code() == E_INVALIDARG; + }); + + // now decrypt + auto decryptedBuffer = winrt::Windows::Security::Cryptography::Core::CryptographicEngine::Decrypt(key, encryptedBuffer, iv); + auto decryptedStream = InMemoryRandomAccessStream(); + decryptedStream.WriteAsync(decryptedBuffer).get(); + + // load! + LearningModel model = nullptr; + WINML_EXPECT_NO_THROW(model = LearningModel::LoadFromStream(RandomAccessStreamReference::CreateFromStream(decryptedStream))); + LearningModelSession session = nullptr; + WINML_EXPECT_NO_THROW(session = LearningModelSession(model)); } -void DeviceLostRecoveryHelper() -{ +void DeviceLostRecoveryHelper() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -1469,13 +1347,10 @@ void DeviceLostRecoveryHelper() } // evaluate should fail - try - { + try { session.Evaluate(binding, L""); FAIL() << "Evaluate should fail after removing the device"; - } - catch (...) - { + } catch (...) { } // remove all references to the device by reseting the session and binding. @@ -1490,60 +1365,55 @@ void DeviceLostRecoveryHelper() exit(0); } -TEST_F(ScenarioCppWinrtGpuTestDeathTest, DeviceLostRecovery) { - - ::testing::FLAGS_gtest_death_test_style = "threadsafe"; - EXPECT_EXIT( - DeviceLostRecoveryHelper(), - ::testing::ExitedWithCode(0), - "" - ); +static void ScenarioCppWinrtGpuTestDeathTest_DeviceLostRecovery() { + ::testing::FLAGS_gtest_death_test_style = "threadsafe"; + EXPECT_EXIT( + DeviceLostRecoveryHelper(), + ::testing::ExitedWithCode(0), + ""); } -TEST_F(ScenarioCppWinrtGpuSkipEdgeCoreTest, D2DInterop) -{ - // load a model (model.onnx == squeezenet[1,3,224,224]) - std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; - LearningModel model = LearningModel::LoadFromFilePath(filePath); - // create a dx12 device - com_ptr device = nullptr; - EXPECT_HRESULT_SUCCEEDED(D3D12CreateDevice(NULL, D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device1), device.put_void())); - // now create a command queue from it - com_ptr commandQueue = nullptr; - D3D12_COMMAND_QUEUE_DESC queueDesc = {}; - queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT; - EXPECT_HRESULT_SUCCEEDED(device->CreateCommandQueue(&queueDesc, winrt::guid_of(), commandQueue.put_void())); - // create a winml learning device based on that dx12 queue - auto factory = get_activation_factory(); - com_ptr<::IUnknown> spUnk; - EXPECT_HRESULT_SUCCEEDED(factory->CreateFromD3D12CommandQueue(commandQueue.get(), spUnk.put())); - auto learningDevice = spUnk.as(); - // create a winml session from that dx device - LearningModelSession session(model, learningDevice); - // now lets try and do some XAML/d2d on that same device, first prealloc a VideoFrame - VideoFrame frame = VideoFrame::CreateAsDirect3D11SurfaceBacked( - DirectXPixelFormat::B8G8R8A8UIntNormalized, - 224, - 224, - session.Device().Direct3D11Device() - ); - // create a D2D factory - D2D1_FACTORY_OPTIONS options = {}; - com_ptr d2dFactory; - EXPECT_HRESULT_SUCCEEDED(D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, __uuidof(ID2D1Factory), &options, d2dFactory.put_void())); - // grab the dxgi surface back from our video frame - com_ptr dxgiSurface; - com_ptr dxgiInterfaceAccess = frame.Direct3DSurface().as(); - EXPECT_HRESULT_SUCCEEDED(dxgiInterfaceAccess->GetInterface(__uuidof(IDXGISurface), dxgiSurface.put_void())); - // and try and use our surface to create a render targer - com_ptr renderTarget; - D2D1_RENDER_TARGET_PROPERTIES props = D2D1::RenderTargetProperties(); - props.pixelFormat = D2D1::PixelFormat( - DXGI_FORMAT_B8G8R8A8_UNORM, - D2D1_ALPHA_MODE_IGNORE - ); - EXPECT_HRESULT_SUCCEEDED(d2dFactory->CreateDxgiSurfaceRenderTarget( - dxgiSurface.get(), - props, - renderTarget.put())); +static void ScenarioCppWinrtGpuSkipEdgeCoreTest_D2DInterop() { + // load a model (model.onnx == squeezenet[1,3,224,224]) + std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; + LearningModel model = LearningModel::LoadFromFilePath(filePath); + // create a dx12 device + com_ptr device = nullptr; + EXPECT_HRESULT_SUCCEEDED(D3D12CreateDevice(NULL, D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device1), device.put_void())); + // now create a command queue from it + com_ptr commandQueue = nullptr; + D3D12_COMMAND_QUEUE_DESC queueDesc = {}; + queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT; + EXPECT_HRESULT_SUCCEEDED(device->CreateCommandQueue(&queueDesc, winrt::guid_of(), commandQueue.put_void())); + // create a winml learning device based on that dx12 queue + auto factory = get_activation_factory(); + com_ptr<::IUnknown> spUnk; + EXPECT_HRESULT_SUCCEEDED(factory->CreateFromD3D12CommandQueue(commandQueue.get(), spUnk.put())); + auto learningDevice = spUnk.as(); + // create a winml session from that dx device + LearningModelSession session(model, learningDevice); + // now lets try and do some XAML/d2d on that same device, first prealloc a VideoFrame + VideoFrame frame = VideoFrame::CreateAsDirect3D11SurfaceBacked( + DirectXPixelFormat::B8G8R8A8UIntNormalized, + 224, + 224, + session.Device().Direct3D11Device()); + // create a D2D factory + D2D1_FACTORY_OPTIONS options = {}; + com_ptr d2dFactory; + EXPECT_HRESULT_SUCCEEDED(D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, __uuidof(ID2D1Factory), &options, d2dFactory.put_void())); + // grab the dxgi surface back from our video frame + com_ptr dxgiSurface; + com_ptr dxgiInterfaceAccess = frame.Direct3DSurface().as(); + EXPECT_HRESULT_SUCCEEDED(dxgiInterfaceAccess->GetInterface(__uuidof(IDXGISurface), dxgiSurface.put_void())); + // and try and use our surface to create a render targer + com_ptr renderTarget; + D2D1_RENDER_TARGET_PROPERTIES props = D2D1::RenderTargetProperties(); + props.pixelFormat = D2D1::PixelFormat( + DXGI_FORMAT_B8G8R8A8_UNORM, + D2D1_ALPHA_MODE_IGNORE); + EXPECT_HRESULT_SUCCEEDED(d2dFactory->CreateDxgiSurfaceRenderTarget( + dxgiSurface.get(), + props, + renderTarget.put())); } diff --git a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h new file mode 100644 index 0000000000000..3880848f68f39 --- /dev/null +++ b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h @@ -0,0 +1,47 @@ +#include "test.h" + +WINML_TEST_CLASS_BEGIN_WITH_SETUP(ScenarioCppWinrtTest, ScenarioCppWinrtTestSetup) +WINML_TEST(ScenarioCppWinrtTest, Sample1, ScenarioCppWinrtTest_Sample1) +WINML_TEST(ScenarioCppWinrtTest, Scenario1LoadBindEvalDefault, ScenarioCppWinrtTest_Scenario1LoadBindEvalDefault) +WINML_TEST(ScenarioCppWinrtTest, Scenario2LoadModelFromStream, ScenarioCppWinrtTest_Scenario2LoadModelFromStream) +WINML_TEST(ScenarioCppWinrtTest, Scenario5AsyncEval, ScenarioCppWinrtTest_Scenario5AsyncEval) +WINML_TEST(ScenarioCppWinrtTest, Scenario7EvalWithNoBind, ScenarioCppWinrtTest_Scenario7EvalWithNoBind) +WINML_TEST(ScenarioCppWinrtTest, Scenario8SetDeviceSampleDefault, ScenarioCppWinrtTest_Scenario8SetDeviceSampleDefault) +WINML_TEST(ScenarioCppWinrtTest, Scenario8SetDeviceSampleCPU, ScenarioCppWinrtTest_Scenario8SetDeviceSampleCPU) +WINML_TEST(ScenarioCppWinrtTest, Scenario17DevDiagnostics, ScenarioCppWinrtTest_Scenario17DevDiagnostics) +WINML_TEST(ScenarioCppWinrtTest, DISABLED_Scenario22ImageBindingAsCPUTensor, ScenarioCppWinrtTest_DISABLED_Scenario22ImageBindingAsCPUTensor) +WINML_TEST(ScenarioCppWinrtTest, QuantizedModels, ScenarioCppWinrtTest_QuantizedModels) +WINML_TEST(ScenarioCppWinrtTest, EncryptedStream, ScenarioCppWinrtTest_EncryptedStream) +WINML_TEST_CLASS_END() + +WINML_TEST_CLASS_BEGIN_WITH_SETUP(ScenarioCppWinrtGpuTest, ScenarioCppWinrtGpuTestSetup) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario3SoftwareBitmapInputBinding, ScenarioCppWinrtGpuTest_Scenario3SoftwareBitmapInputBinding) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario6BindWithProperties, ScenarioCppWinrtGpuTest_Scenario6BindWithProperties) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleDefaultDirectX, ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleDefaultDirectX) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMinPower, ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMinPower) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMaxPerf, ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMaxPerf) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMyCameraDevice, ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMyCameraDevice) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleCustomCommandQueue, ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleCustomCommandQueue) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario9LoadBindEvalInputTensorGPU, ScenarioCppWinrtGpuTest_DISABLED_Scenario9LoadBindEvalInputTensorGPU) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario13SingleModelOnCPUandGPU, ScenarioCppWinrtGpuTest_Scenario13SingleModelOnCPUandGPU) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario11FreeDimensionsTensor, ScenarioCppWinrtGpuTest_Scenario11FreeDimensionsTensor) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario11FreeDimensionsImage, ScenarioCppWinrtGpuTest_Scenario11FreeDimensionsImage) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario14RunModelSwapchain, ScenarioCppWinrtGpuTest_Scenario14RunModelSwapchain) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario20aLoadBindEvalCustomOperatorCPU, ScenarioCppWinrtGpuTest_Scenario20aLoadBindEvalCustomOperatorCPU) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario20bLoadBindEvalReplacementCustomOperatorCPU, ScenarioCppWinrtGpuTest_Scenario20bLoadBindEvalReplacementCustomOperatorCPU) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario21RunModel2ChainZ, ScenarioCppWinrtGpuTest_DISABLED_Scenario21RunModel2ChainZ) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario22ImageBindingAsGPUTensor, ScenarioCppWinrtGpuTest_DISABLED_Scenario22ImageBindingAsGPUTensor) +WINML_TEST(ScenarioCppWinrtGpuTest, MsftQuantizedModels, ScenarioCppWinrtGpuTest_MsftQuantizedModels) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_SyncVsAsync, ScenarioCppWinrtGpuTest_DISABLED_SyncVsAsync) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_CustomCommandQueueWithFence, ScenarioCppWinrtGpuTest_DISABLED_CustomCommandQueueWithFence) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_ReuseVideoFrame, ScenarioCppWinrtGpuTest_DISABLED_ReuseVideoFrame) +WINML_TEST_CLASS_END() + +WINML_TEST_CLASS_BEGIN_WITH_SETUP(ScenarioCppWinrtGpuTestDeathTest, ScenarioCppWinrtGpuTestSetup) +WINML_TEST(ScenarioCppWinrtGpuTestDeathTest, DeviceLostRecovery, ScenarioCppWinrtGpuTestDeathTest_DeviceLostRecovery) +WINML_TEST_CLASS_END() + +WINML_TEST_CLASS_BEGIN_WITH_SETUP(ScenarioCppWinrtGpuSkipEdgeCoreTest, ScenarioCppWinrtGpuSkipEdgeCoreTestSetup) +WINML_TEST(ScenarioCppWinrtGpuSkipEdgeCoreTest, Scenario8SetDeviceSampleD3D11Device, ScenarioCppWinrtGpuSkipEdgeCoreTest_Scenario8SetDeviceSampleD3D11Device) +WINML_TEST(ScenarioCppWinrtGpuSkipEdgeCoreTest, D2DInterop, ScenarioCppWinrtGpuSkipEdgeCoreTest_D2DInterop) +WINML_TEST_CLASS_END() From ab2b4cd7a7472d169a0e99c5e5981313de108cec Mon Sep 17 00:00:00 2001 From: Ryan Lai Date: Tue, 7 Jan 2020 14:05:58 -0800 Subject: [PATCH 2/6] change test methods to disabled --- .../scenario/cppwinrt/scenariotestscppwinrt.cpp | 15 +++++++-------- .../scenario/cppwinrt/scenariotestscppwinrt.h | 14 +++++++------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp index 87dde2d21caf7..8b82934f3ffc4 100644 --- a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp +++ b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp @@ -1,7 +1,6 @@ #include "testPch.h" #include -#include #include "winrt/Windows.Devices.Enumeration.Pnp.h" #include "winrt/Windows.Graphics.DirectX.Direct3D11.h" @@ -450,7 +449,7 @@ static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleCustomCommandQueue() } //pass a Tensor in as an input GPU -static void ScenarioCppWinrtGpuTest_DISABLED_Scenario9LoadBindEvalInputTensorGPU() { +static void ScenarioCppWinrtGpuTest_Scenario9LoadBindEvalInputTensorGPU() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"fns-candy.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -716,7 +715,7 @@ static void ScenarioCppWinrtGpuTest_Scenario20bLoadBindEvalReplacementCustomOper } //! Scenario21: Load two models, set them up to run chained after one another on the same gpu hardware device -static void ScenarioCppWinrtGpuTest_DISABLED_Scenario21RunModel2ChainZ() { +static void ScenarioCppWinrtGpuTest_Scenario21RunModel2ChainZ() { // load a model, TODO: get a model that has an image descriptor std::wstring filePath = FileHelpers::GetModulePath() + L"fns-candy.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -795,7 +794,7 @@ bool VerifyHelper(ImageFeatureValue actual, ImageFeatureValue expected) { return ((float)errors / size < cMaxErrorRate); } -static void ScenarioCppWinrtTest_DISABLED_Scenario22ImageBindingAsCPUTensor() { +static void ScenarioCppWinrtTest_Scenario22ImageBindingAsCPUTensor() { std::wstring modulePath = FileHelpers::GetModulePath(); std::wstring inputImagePath = modulePath + L"fish_720.png"; std::wstring bmImagePath = modulePath + L"bm_fish_720.jpg"; @@ -868,7 +867,7 @@ static void ScenarioCppWinrtTest_DISABLED_Scenario22ImageBindingAsCPUTensor() { encoder.FlushAsync().get(); } -static void ScenarioCppWinrtGpuTest_DISABLED_Scenario22ImageBindingAsGPUTensor() { +static void ScenarioCppWinrtGpuTest_Scenario22ImageBindingAsGPUTensor() { std::wstring modulePath = FileHelpers::GetModulePath(); std::wstring inputImagePath = modulePath + L"fish_720.png"; std::wstring bmImagePath = modulePath + L"bm_fish_720.jpg"; @@ -1085,7 +1084,7 @@ static void ScenarioCppWinrtGpuTest_MsftQuantizedModels() { WINML_EXPECT_NO_THROW(session.Evaluate(binding, filePath)); } -static void ScenarioCppWinrtGpuTest_DISABLED_SyncVsAsync() { +static void ScenarioCppWinrtGpuTest_SyncVsAsync() { // create model, device and session LearningModel model = nullptr; WINML_EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(FileHelpers::GetModulePath() + L"fns-candy.onnx")); @@ -1152,7 +1151,7 @@ static void ScenarioCppWinrtGpuTest_DISABLED_SyncVsAsync() { std::cout << "Asynchronous time for " << N << " evaluations: " << asyncTime.count() << " milliseconds\n"; } -static void ScenarioCppWinrtGpuTest_DISABLED_CustomCommandQueueWithFence() { +static void ScenarioCppWinrtGpuTest_CustomCommandQueueWithFence() { static const wchar_t* const modelFileName = L"fns-candy.onnx"; static const wchar_t* const inputDataImageFileName = L"fish_720.png"; @@ -1236,7 +1235,7 @@ static void ScenarioCppWinrtGpuTest_DISABLED_CustomCommandQueueWithFence() { EXPECT_EQ(fence->GetCompletedValue(), 2); } -static void ScenarioCppWinrtGpuTest_DISABLED_ReuseVideoFrame() { +static void ScenarioCppWinrtGpuTest_ReuseVideoFrame() { std::wstring modulePath = FileHelpers::GetModulePath(); std::wstring inputImagePath = modulePath + L"fish_720.png"; std::wstring bmImagePath = modulePath + L"bm_fish_720.jpg"; diff --git a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h index 3880848f68f39..87962552fc5f3 100644 --- a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h +++ b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h @@ -9,7 +9,7 @@ WINML_TEST(ScenarioCppWinrtTest, Scenario7EvalWithNoBind, ScenarioCppWinrtTest_S WINML_TEST(ScenarioCppWinrtTest, Scenario8SetDeviceSampleDefault, ScenarioCppWinrtTest_Scenario8SetDeviceSampleDefault) WINML_TEST(ScenarioCppWinrtTest, Scenario8SetDeviceSampleCPU, ScenarioCppWinrtTest_Scenario8SetDeviceSampleCPU) WINML_TEST(ScenarioCppWinrtTest, Scenario17DevDiagnostics, ScenarioCppWinrtTest_Scenario17DevDiagnostics) -WINML_TEST(ScenarioCppWinrtTest, DISABLED_Scenario22ImageBindingAsCPUTensor, ScenarioCppWinrtTest_DISABLED_Scenario22ImageBindingAsCPUTensor) +WINML_TEST(ScenarioCppWinrtTest, DISABLED_Scenario22ImageBindingAsCPUTensor, ScenarioCppWinrtTest_Scenario22ImageBindingAsCPUTensor) WINML_TEST(ScenarioCppWinrtTest, QuantizedModels, ScenarioCppWinrtTest_QuantizedModels) WINML_TEST(ScenarioCppWinrtTest, EncryptedStream, ScenarioCppWinrtTest_EncryptedStream) WINML_TEST_CLASS_END() @@ -22,19 +22,19 @@ WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMinPower, ScenarioCp WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMaxPerf, ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMaxPerf) WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMyCameraDevice, ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMyCameraDevice) WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleCustomCommandQueue, ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleCustomCommandQueue) -WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario9LoadBindEvalInputTensorGPU, ScenarioCppWinrtGpuTest_DISABLED_Scenario9LoadBindEvalInputTensorGPU) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario9LoadBindEvalInputTensorGPU, ScenarioCppWinrtGpuTest_Scenario9LoadBindEvalInputTensorGPU) WINML_TEST(ScenarioCppWinrtGpuTest, Scenario13SingleModelOnCPUandGPU, ScenarioCppWinrtGpuTest_Scenario13SingleModelOnCPUandGPU) WINML_TEST(ScenarioCppWinrtGpuTest, Scenario11FreeDimensionsTensor, ScenarioCppWinrtGpuTest_Scenario11FreeDimensionsTensor) WINML_TEST(ScenarioCppWinrtGpuTest, Scenario11FreeDimensionsImage, ScenarioCppWinrtGpuTest_Scenario11FreeDimensionsImage) WINML_TEST(ScenarioCppWinrtGpuTest, Scenario14RunModelSwapchain, ScenarioCppWinrtGpuTest_Scenario14RunModelSwapchain) WINML_TEST(ScenarioCppWinrtGpuTest, Scenario20aLoadBindEvalCustomOperatorCPU, ScenarioCppWinrtGpuTest_Scenario20aLoadBindEvalCustomOperatorCPU) WINML_TEST(ScenarioCppWinrtGpuTest, Scenario20bLoadBindEvalReplacementCustomOperatorCPU, ScenarioCppWinrtGpuTest_Scenario20bLoadBindEvalReplacementCustomOperatorCPU) -WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario21RunModel2ChainZ, ScenarioCppWinrtGpuTest_DISABLED_Scenario21RunModel2ChainZ) -WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario22ImageBindingAsGPUTensor, ScenarioCppWinrtGpuTest_DISABLED_Scenario22ImageBindingAsGPUTensor) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario21RunModel2ChainZ, ScenarioCppWinrtGpuTest_Scenario21RunModel2ChainZ) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario22ImageBindingAsGPUTensor, ScenarioCppWinrtGpuTest_Scenario22ImageBindingAsGPUTensor) WINML_TEST(ScenarioCppWinrtGpuTest, MsftQuantizedModels, ScenarioCppWinrtGpuTest_MsftQuantizedModels) -WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_SyncVsAsync, ScenarioCppWinrtGpuTest_DISABLED_SyncVsAsync) -WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_CustomCommandQueueWithFence, ScenarioCppWinrtGpuTest_DISABLED_CustomCommandQueueWithFence) -WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_ReuseVideoFrame, ScenarioCppWinrtGpuTest_DISABLED_ReuseVideoFrame) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_SyncVsAsync, ScenarioCppWinrtGpuTest_SyncVsAsync) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_CustomCommandQueueWithFence, ScenarioCppWinrtGpuTest_CustomCommandQueueWithFence) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_ReuseVideoFrame, ScenarioCppWinrtGpuTest_ReuseVideoFrame) WINML_TEST_CLASS_END() WINML_TEST_CLASS_BEGIN_WITH_SETUP(ScenarioCppWinrtGpuTestDeathTest, ScenarioCppWinrtGpuTestSetup) From 457e6d8268d113592443f715b9e1dd85ea093c83 Mon Sep 17 00:00:00 2001 From: Ryan Lai Date: Thu, 9 Jan 2020 13:58:38 -0800 Subject: [PATCH 3/6] Add custom winml macros for both taef and google tests --- cmake/winml_unittests.cmake | 2 + winml/test/common/SqueezeNetValidator.cpp | 48 ++--- winml/test/common/SqueezeNetValidator.h | 2 +- winml/test/common/googleTestMacros.h | 64 ++++-- winml/test/common/protobufHelpers.cpp | 39 ++-- winml/test/common/runtimeParameters.h | 2 +- winml/test/common/std.h | 34 +-- winml/test/common/taefTestMacros.h | 59 ++++++ winml/test/common/test.h | 5 +- winml/test/common/testPch.h | 2 + winml/test/scenario/cppwinrt/CustomNullOp.h | 8 +- .../cppwinrt/CustomOperatorProvider.h | 2 +- winml/test/scenario/cppwinrt/CustomOps.cpp | 127 +++++------ winml/test/scenario/cppwinrt/CustomOps.h | 21 ++ .../cppwinrt/scenariotestscppwinrt.cpp | 200 ++++++++++-------- .../scenario/cppwinrt/scenariotestscppwinrt.h | 116 ++++++---- 16 files changed, 453 insertions(+), 278 deletions(-) create mode 100644 winml/test/scenario/cppwinrt/CustomOps.h diff --git a/cmake/winml_unittests.cmake b/cmake/winml_unittests.cmake index 18e6f7cdf85ff..95661e85d8bc2 100644 --- a/cmake/winml_unittests.cmake +++ b/cmake/winml_unittests.cmake @@ -60,6 +60,7 @@ add_dependencies(winml_test_common winml_api winml_dll ) +target_compile_definitions(winml_test_common PRIVATE BUILD_GOOGLE_TEST) set_winml_target_properties(winml_test_common) file(GLOB winml_test_api_src CONFIGURE_DEPENDS "${WINML_TEST_SRC_DIR}/api/*.cpp") @@ -82,6 +83,7 @@ add_winml_test( LIBS winml_test_common ${winml_test_scenario_libs} ) target_precompiled_header(winml_test_scenario testPch.h) +target_compile_definitions(winml_test_scenario PRIVATE BUILD_GOOGLE_TEST) set_target_properties(winml_test_scenario PROPERTIES LINK_FLAGS "/DELAYLOAD:d2d1.dll /DELAYLOAD:d3d11.dll /DELAYLOAD:dxgi.dll" ) diff --git a/winml/test/common/SqueezeNetValidator.cpp b/winml/test/common/SqueezeNetValidator.cpp index e3fc0978ad086..2eacc456cd017 100644 --- a/winml/test/common/SqueezeNetValidator.cpp +++ b/winml/test/common/SqueezeNetValidator.cpp @@ -2,12 +2,11 @@ #include "protobufHelpers.h" #include "fileHelpers.h" #include "core/common/common.h" -#include #include #include #include #include - +#include // using namespace winrt::Windows::Foundation; using namespace winrt::Windows::AI::MachineLearning; using namespace winrt::Windows::Foundation::Collections; @@ -35,12 +34,12 @@ static void BindImage( if (bindAsInspectable) { - EXPECT_NO_THROW(binding.Bind(name, frame)); + WINML_EXPECT_NO_THROW(binding.Bind(name, frame)); } else { auto imagetensor = ImageFeatureValue::CreateFromVideoFrame(frame); - EXPECT_NO_THROW(binding.Bind(name, imagetensor)); + WINML_EXPECT_NO_THROW(binding.Bind(name, imagetensor)); } } @@ -50,15 +49,15 @@ static void BindTensor( ITensor inputTensor, bool bindAsInspectable = false) { - EXPECT_TRUE(inputTensor != nullptr); + WINML_EXPECT_TRUE(inputTensor != nullptr); if (bindAsInspectable) { - EXPECT_NO_THROW(binding.Bind(name, inputTensor.as().GetAsVectorView())); + WINML_EXPECT_NO_THROW(binding.Bind(name, inputTensor.as().GetAsVectorView())); } else { - EXPECT_NO_THROW(binding.Bind(name, inputTensor)); + WINML_EXPECT_NO_THROW(binding.Bind(name, inputTensor)); } } @@ -75,11 +74,11 @@ ITensor BindOutput( { case OutputBindingStrategy::Bound: outputTensor = T::Create(shape); - EXPECT_NO_THROW(binding.Bind(name, outputTensor)); + WINML_EXPECT_NO_THROW(binding.Bind(name, outputTensor)); break; case OutputBindingStrategy::Empty: outputTensor = T::Create(); - EXPECT_NO_THROW(binding.Bind(name, outputTensor)); + WINML_EXPECT_NO_THROW(binding.Bind(name, outputTensor)); break; case OutputBindingStrategy::Unbound: __fallthrough; @@ -104,7 +103,7 @@ ImageFeatureValue BindImageOutput( SoftwareBitmap bitmap(BitmapPixelFormat::Bgra8, 720, 720); VideoFrame frame = VideoFrame::CreateWithSoftwareBitmap(bitmap); outputTensor = ImageFeatureValue::CreateFromVideoFrame(frame); - EXPECT_NO_THROW(binding.Bind(name, outputTensor)); + WINML_EXPECT_NO_THROW(binding.Bind(name, outputTensor)); break; } case OutputBindingStrategy::Unbound: @@ -136,10 +135,10 @@ void ModelValidator::FnsCandy16( // WinML model creation LearningModel model = nullptr; - EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(fullModelPath)); + WINML_EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(fullModelPath)); LearningModelSession modelSession = nullptr; - EXPECT_NO_THROW(modelSession = LearningModelSession(model, LearningModelDevice(deviceKind))); + WINML_EXPECT_NO_THROW(modelSession = LearningModelSession(model, LearningModelDevice(deviceKind))); LearningModelBinding modelBinding(modelSession); auto fullImagePath = modulePath + inputDataImageFileName; @@ -147,7 +146,7 @@ void ModelValidator::FnsCandy16( // create the tensor for the actual output auto output = model.OutputFeatures().First().Current(); - EXPECT_TRUE(output.Kind() == LearningModelFeatureKind::Tensor); + WINML_EXPECT_TRUE(output.Kind() == LearningModelFeatureKind::Tensor); auto shape = winrt::single_threaded_vector(std::vector {1, 1}); auto outputTensor = BindImageOutput(outputBindingStrategy, modelBinding, outputDataBindingName); @@ -155,7 +154,7 @@ void ModelValidator::FnsCandy16( // Evaluate the model std::cout << "Calling EvaluateSync on instance" << instance << "\n"; LearningModelEvaluationResult result = nullptr; - EXPECT_NO_THROW(result = modelSession.Evaluate(modelBinding, {})); + WINML_EXPECT_NO_THROW(result = modelSession.Evaluate(modelBinding, {})); // Get results if (outputBindingStrategy == OutputBindingStrategy::Unbound) @@ -167,7 +166,7 @@ void ModelValidator::FnsCandy16( } else { - EXPECT_EQ(result.Outputs().Lookup(outputDataBindingName), outputTensor); + WINML_EXPECT_EQUAL(result.Outputs().Lookup(outputDataBindingName), outputTensor); auto softwareBitmap = outputTensor.VideoFrame().SoftwareBitmap(); @@ -203,10 +202,10 @@ void ModelValidator::SqueezeNet( // WinML model creation LearningModel model = nullptr; - EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(fullModelPath)); + WINML_EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(fullModelPath)); LearningModelSession modelSession = nullptr; - EXPECT_NO_THROW(modelSession = LearningModelSession(model, LearningModelDevice(deviceKind))); + WINML_EXPECT_NO_THROW(modelSession = LearningModelSession(model, LearningModelDevice(deviceKind))); LearningModelBinding modelBinding(modelSession); @@ -224,11 +223,11 @@ void ModelValidator::SqueezeNet( // load up the expected output auto expectedResultsTensor = ProtobufHelpers::LoadTensorFromProtobufFile(outputFileName, false); - EXPECT_TRUE(expectedResultsTensor != nullptr); + WINML_EXPECT_TRUE(expectedResultsTensor != nullptr); // create the tensor for the actual output auto output = model.OutputFeatures().First().Current(); - EXPECT_TRUE(output.Kind() == LearningModelFeatureKind::Tensor); + WINML_EXPECT_TRUE(output.Kind() == LearningModelFeatureKind::Tensor); auto outputTensor = BindOutput( outputBindingStrategy, modelBinding, outputDataBindingName, expectedResultsTensor.Shape()); @@ -236,7 +235,7 @@ void ModelValidator::SqueezeNet( // Evaluate the model std::cout << "Calling EvaluateSync on instance" << instance << "\n"; LearningModelEvaluationResult result = nullptr; - EXPECT_NO_THROW(result = modelSession.Evaluate(modelBinding, {})); + WINML_EXPECT_NO_THROW(result = modelSession.Evaluate(modelBinding, {})); // Get results if (outputBindingStrategy == OutputBindingStrategy::Unbound) @@ -247,21 +246,22 @@ void ModelValidator::SqueezeNet( } else { - EXPECT_EQ(result.Outputs().Lookup(outputDataBindingName), outputTensor); + WINML_EXPECT_EQUAL(result.Outputs().Lookup(outputDataBindingName), outputTensor); } auto outDataExpected = expectedResultsTensor.as().GetAsVectorView(); auto outDataActual = outputTensor.as().GetAsVectorView(); - EXPECT_TRUE(outDataActual.Size() == outDataExpected.Size()); + WINML_EXPECT_TRUE(outDataActual.Size() == outDataExpected.Size()); for (uint32_t i = 0; i < outDataActual.Size(); i++) { float delta = std::abs(outDataActual.GetAt(i) - outDataExpected.GetAt(i)); if (delta > dataTolerance) { - ADD_FAILURE() << "EXPECTED: " << outDataExpected.GetAt(i) << " , ACTUAL: " << outDataActual.GetAt(i) + std::stringstream ss; + ss << "EXPECTED: " << outDataExpected.GetAt(i) << " , ACTUAL: " << outDataActual.GetAt(i) << "instance " << instance << ", element " << i; - + WINML_LOG_ERROR(ss.str().c_str()); } } } diff --git a/winml/test/common/SqueezeNetValidator.h b/winml/test/common/SqueezeNetValidator.h index 68b44ddb2dd3b..613df41853fbe 100644 --- a/winml/test/common/SqueezeNetValidator.h +++ b/winml/test/common/SqueezeNetValidator.h @@ -6,7 +6,7 @@ #pragma once -#include +#include "std.h" enum OutputBindingStrategy { Bound, Unbound, Empty }; diff --git a/winml/test/common/googleTestMacros.h b/winml/test/common/googleTestMacros.h index f7f8799fb7d2f..55fb2f7f740fe 100644 --- a/winml/test/common/googleTestMacros.h +++ b/winml/test/common/googleTestMacros.h @@ -1,27 +1,63 @@ #include +#include "runtimeParameters.h" #define TEST_GROUP_BEGIN(group_name) #define TEST_GROUP_END() -#define WINML_TEST(group_name, test_name, method) \ -static void method(); \ -TEST_F(group_name, test_name) { \ - method(); \ -} +#define WINML_TEST(group_name, test_name) \ + TEST_F(group_name, test_name) { \ + getapi().test_name(); \ + } #define WINML_TEST_CLASS_BEGIN_NO_SETUP(test_class_name) \ - class test_class_name : public ::testing::Test { \ + class test_class_name : public ::testing::Test { \ }; #define WINML_TEST_CLASS_BEGIN_WITH_SETUP(test_class_name, setup_method) \ -static void setup_method(); \ - class test_class_name : public ::testing::Test { \ - protected: \ - void SetUp() override { \ - setup_method(); \ - } \ -}; + class test_class_name : public ::testing::Test { \ + protected: \ + void SetUp() override { \ + getapi().setup_method(); \ + } \ + }; #define WINML_TEST_CLASS_END() -#define WINML_EXPECT_NO_THROW(statement) EXPECT_NO_THROW(statement) \ No newline at end of file +// For old versions of gtest without GTEST_SKIP, stream the message and return success instead +#ifndef GTEST_SKIP +#define GTEST_SKIP_(message) \ + return GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess) +#define GTEST_SKIP GTEST_SKIP_("") +#endif + +#define WINML_SKIP_TEST(message) \ + GTEST_SKIP() << message; + +#define WINML_EXPECT_NO_THROW(statement) EXPECT_NO_THROW(statement) +#define WINML_EXPECT_TRUE(statement) EXPECT_TRUE(statement) +#define WINML_EXPECT_EQUAL(val1, val2) EXPECT_EQ(val1, val2) +#define WINML_EXPECT_NOT_EQUAL(val1, val2) EXPECT_NE(val1, val2) + +#define WINML_LOG_ERROR(message) \ + ADD_FAILURE() << message + +#define WINML_EXPECT_HRESULT_SUCCEEDED(hresult_expression) EXPECT_HRESULT_SUCCEEDED(hresult_expression) +#define WINML_EXPECT_HRESULT_FAILED(hresult_expression) EXPECT_HRESULT_FAILED(hresult_expression) +#define WINML_EXPECT_THROW_SPECIFIC(statement, exception, condition) EXPECT_THROW_SPECIFIC(statement, exception, condition) + +#ifndef USE_DML +#define GPUTEST \ + WINML_SKIP_TEST("GPU tests disabled because this is a WinML only build (no DML)") +#else +#define GPUTEST \ + if (auto noGpuTests = RuntimeParameters::Parameters.find("noGPUtests"); \ + noGpuTests != RuntimeParameters::Parameters.end() && noGpuTests->second != "0") { \ + WINML_SKIP_TEST("GPU tests disabled"); \ + } +#endif + +#define SKIP_EDGECORE \ + if (auto isEdgeCore = RuntimeParameters::Parameters.find("EdgeCore"); \ + isEdgeCore != RuntimeParameters::Parameters.end() && isEdgeCore->second != "0") { \ + WINML_SKIP_TEST("Test can't be run in EdgeCore"); \ + } \ No newline at end of file diff --git a/winml/test/common/protobufHelpers.cpp b/winml/test/common/protobufHelpers.cpp index a5bc12a70e4a2..0f3c22fb1c36b 100644 --- a/winml/test/common/protobufHelpers.cpp +++ b/winml/test/common/protobufHelpers.cpp @@ -1,10 +1,11 @@ -#define _SILENCE_ALL_CXX17_DEPRECATION_WARNINGS +#ifndef _SILENCE_ALL_CXX17_DEPRECATION_WARNINGS +#define _SILENCE_ALL_CXX17_DEPRECATION_WARNINGS +#endif // LotusRT #include "core/framework/allocatormgr.h" #include "core/common/logging/logging.h" #include "core/common/logging/sinks/clog_sink.h" - #include "protobufHelpers.h" #pragma warning(push) @@ -12,7 +13,6 @@ #include "onnx/onnx-ml.pb.h" #pragma warning(pop) -#include #include #include "winrt/Windows.Storage.Streams.h" @@ -66,33 +66,35 @@ bool LoadTensorFromPb(onnx::TensorProto& tensor, std::wstring filePath) { template std::vector GetTypeSpecificDataFromTensorProto( - onnx::TensorProto /*tensorProto*/){ + onnx::TensorProto /*tensorProto*/) { static_assert(false, "UNDEFINED! TensorProto methods aren't templated, so add a new template specialization."); } template <> std::vector GetTypeSpecificDataFromTensorProto( - onnx::TensorProto tensorProto){ + onnx::TensorProto tensorProto) { return std::vector(std::begin(tensorProto.float_data()), std::end(tensorProto.float_data())); } template <> std::vector GetTypeSpecificDataFromTensorProto( - onnx::TensorProto tensorProto){ + onnx::TensorProto tensorProto) { return std::vector(std::begin(tensorProto.int32_data()), std::end(tensorProto.int32_data())); } template <> std::vector GetTypeSpecificDataFromTensorProto( - onnx::TensorProto tensorProto){ + onnx::TensorProto tensorProto) { return std::vector(std::begin(tensorProto.int64_data()), std::end(tensorProto.int64_data())); } template std::vector GetTensorDataFromTensorProto( - onnx::TensorProto tensorProto, - uint64_t elementCount) { + onnx::TensorProto tensorProto, + uint64_t elementCount) { if (tensorProto.has_raw_data()) { std::vector tensorData; auto& values = tensorProto.raw_data(); - EXPECT_EQ(elementCount, values.size() / sizeof(DataType)) << L"TensorProto elementcount should match raw data buffer size in elements."; + if (elementCount != values.size() / sizeof(DataType)) { + WINML_LOG_ERROR("TensorProto element count should match raw data buffer size in elements."); + } tensorData = std::vector(elementCount); memcpy(tensorData.data(), values.data(), values.size()); @@ -105,7 +107,7 @@ std::vector GetTensorDataFromTensorProto( static std::vector GetTensorStringDataFromTensorProto( onnx::TensorProto tensorProto, uint64_t elementCount) { - EXPECT_EQ(tensorProto.string_data_size(), elementCount); + WINML_EXPECT_EQUAL(tensorProto.string_data_size(), elementCount); auto& values = tensorProto.string_data(); auto returnVector = std::vector(elementCount); std::transform(std::begin(values), std::end(values), std::begin(returnVector), @@ -131,15 +133,15 @@ ITensor ProtobufHelpers::LoadTensorFromProtobufFile( } switch (tensorProto.data_type()) { case (onnx::TensorProto::DataType::TensorProto_DataType_FLOAT): - return TensorFloat::CreateFromIterable(tensorShape, GetTensorDataFromTensorProto(tensorProto, elementCount)); + return TensorFloat::CreateFromIterable(tensorShape, GetTensorDataFromTensorProto(tensorProto, elementCount)); case (onnx::TensorProto::DataType::TensorProto_DataType_INT32): - return TensorInt32Bit::CreateFromIterable(tensorShape, GetTensorDataFromTensorProto(tensorProto, elementCount)); + return TensorInt32Bit::CreateFromIterable(tensorShape, GetTensorDataFromTensorProto(tensorProto, elementCount)); case (onnx::TensorProto::DataType::TensorProto_DataType_INT64): - return TensorInt64Bit::CreateFromIterable(tensorShape, GetTensorDataFromTensorProto(tensorProto, elementCount)); + return TensorInt64Bit::CreateFromIterable(tensorShape, GetTensorDataFromTensorProto(tensorProto, elementCount)); case (onnx::TensorProto::DataType::TensorProto_DataType_STRING): return TensorString::CreateFromIterable(tensorShape, GetTensorStringDataFromTensorProto(tensorProto, elementCount)); default: - ADD_FAILURE() << L"Tensor type for creating tensor from protobuf file not supported."; + WINML_LOG_ERROR("Tensor type for creating tensor from protobuf file not supported."); break; } } @@ -152,7 +154,7 @@ TensorFloat16Bit ProtobufHelpers::LoadTensorFloat16FromProtobufFile( onnx::TensorProto tensorProto; if (LoadTensorFromPb(tensorProto, filePath)) { if (tensorProto.has_data_type()) { - EXPECT_EQ(onnx::TensorProto::DataType::TensorProto_DataType_FLOAT16, tensorProto.data_type()); + WINML_EXPECT_EQUAL(onnx::TensorProto::DataType::TensorProto_DataType_FLOAT16, tensorProto.data_type()); } else { std::cerr << "Loading unknown TensorProto datatype as TensorFloat16Bit.\n"; } @@ -166,7 +168,10 @@ TensorFloat16Bit ProtobufHelpers::LoadTensorFloat16FromProtobufFile( uint32_t sizeInBytes; spTensorValueNative->GetBuffer(reinterpret_cast(&data), &sizeInBytes); - EXPECT_TRUE(tensorProto.has_raw_data()) << L"Float16 tensor proto buffers are expected to contain raw data."; + if (!tensorProto.has_raw_data()) + { + WINML_LOG_ERROR("Float16 tensor proto buffers are expected to contain raw data."); + } auto& raw_data = tensorProto.raw_data(); auto buff = raw_data.c_str(); diff --git a/winml/test/common/runtimeParameters.h b/winml/test/common/runtimeParameters.h index 6b0edc7a9cc8b..a98e6c5e283a0 100644 --- a/winml/test/common/runtimeParameters.h +++ b/winml/test/common/runtimeParameters.h @@ -6,4 +6,4 @@ namespace RuntimeParameters { // Runtime parameters passed through CLI arguments extern std::unordered_map Parameters; -} +} \ No newline at end of file diff --git a/winml/test/common/std.h b/winml/test/common/std.h index a2ea803ab70a9..162915abd02fd 100644 --- a/winml/test/common/std.h +++ b/winml/test/common/std.h @@ -19,7 +19,7 @@ #include #include -#include +#include "test.h" // IUnknown must be declared before winrt/base.h is included to light up support for native COM // interfaces with C++/WinRT types (e.g. winrt::com_ptr). @@ -31,8 +31,6 @@ // WinML #include "Windows.AI.MachineLearning.Native.h" -#include "runtimeParameters.h" - #define EXPECT_THROW_SPECIFIC(statement, exception, condition) \ EXPECT_THROW( \ try { \ @@ -43,35 +41,7 @@ } \ , exception); -// For old versions of gtest without GTEST_SKIP, stream the message and return success instead -#ifndef GTEST_SKIP -#define GTEST_SKIP_(message) \ - return GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess) -#define GTEST_SKIP GTEST_SKIP_("") -#endif - #ifndef INSTANTIATE_TEST_SUITE_P // Use the old name, removed in newer versions of googletest #define INSTANTIATE_TEST_SUITE_P INSTANTIATE_TEST_CASE_P -#endif - - -#ifndef USE_DML -#define GPUTEST \ - GTEST_SKIP() << "GPU tests disabled because this is a WinML only build (no DML)"; -#else -#define GPUTEST \ - if (auto noGpuTests = RuntimeParameters::Parameters.find("noGPUtests"); \ - noGpuTests != RuntimeParameters::Parameters.end() && noGpuTests->second != "0") \ - { \ - GTEST_SKIP() << "GPU tests disabled"; \ - } -#endif - - -#define SKIP_EDGECORE \ - if (auto isEdgeCore = RuntimeParameters::Parameters.find("EdgeCore"); \ - isEdgeCore != RuntimeParameters::Parameters.end() && isEdgeCore->second != "0") \ - { \ - GTEST_SKIP() << "Test can't be run in EdgeCore"; \ - } +#endif \ No newline at end of file diff --git a/winml/test/common/taefTestMacros.h b/winml/test/common/taefTestMacros.h index e69de29bb2d1d..7c7636b7a4dda 100644 --- a/winml/test/common/taefTestMacros.h +++ b/winml/test/common/taefTestMacros.h @@ -0,0 +1,59 @@ +#include "WexTestClass.h" + +using namespace WEX::Logging; +using namespace WEX::Common; +using namespace WEX::TestExecution; + +#define WINML_EXPECT_NO_THROW(statement) VERIFY_NO_THROW(statement) + +#define WINML_TEST_CLASS_BEGIN_WITH_SETUP(test_class_name, setup_method) \ + class test_class_name { \ + TEST_CLASS(test_class_name); \ + TEST_CLASS_SETUP(TestClassSetup) { \ + getapi().setup_method(); \ + return true; \ + } + +#define WINML_TEST_CLASS_END() \ + } \ + ; + +#define WINML_TEST(group_name, test_name) \ + TEST_METHOD(test_name) { \ + getapi().test_name(); \ + } + +#define WINML_SKIP_TEST(message) \ + Log::Result(TestResults::Skipped, \ + std::wstring_convert>().from_bytes(message).c_str()); \ + return; + +#define WINML_EXPECT_NO_THROW(statement) VERIFY_NO_THROW(statement) +#define WINML_EXPECT_TRUE(statement) VERIFY_IS_TRUE(statement) +#define WINML_EXPECT_EQUAL(val1, val2) VERIFY_ARE_EQUAL(val1, val2) +#define WINML_EXPECT_NOT_EQUAL(val1, val2) VERIFY_ARE_NOT_EQUAL(val1, val2) +#define WINML_LOG_ERROR(message) \ + VERIFY_FAIL(std::wstring_convert>().from_bytes(message).c_str()) + +#define WINML_EXPECT_HRESULT_SUCCEEDED(hresult_expression) VERIFY_SUCCEEDED(hresult_expression) +#define WINML_EXPECT_THROW_SPECIFIC(statement, exception, condition) VERIFY_THROWS_SPECIFIC(statement, exception, condition) +#define WINML_EXPECT_HRESULT_FAILED(hresult_expression) VERIFY_FAILED(hresult_expression) + +#ifndef USE_DML +#define GPUTEST \ + WINML_SKIP_TEST("GPU tests disabled because this is a WinML only build (no DML)") +#else +#define GPUTEST \ + bool noGPUTests; \ + if (SUCCEEDED(RuntimeParameters::TryGetValue(L"noGPUtests", noGPUTests)) && noGPUTests) { \ + WINML_SKIP_TEST("This test is disabled by the noGPUTests runtime parameter."); \ + return; \ + } +#endif + +#define SKIP_EDGECORE \ + bool edgeCoreRun; \ + if (SUCCEEDED(RuntimeParameters::TryGetValue(L"EdgeCore", edgeCoreRun)) && edgeCoreRun) { \ + WINML_SKIP_TEST("This test is disabled by the EdgeCore runtime parameter."); \ + return; \ + } \ No newline at end of file diff --git a/winml/test/common/test.h b/winml/test/common/test.h index 6f5306fd50e85..f3376d4bd6ed1 100644 --- a/winml/test/common/test.h +++ b/winml/test/common/test.h @@ -1,6 +1,7 @@ -#define BUILD_GOOGLE_TEST #ifdef BUILD_GOOGLE_TEST #include "googleTestMacros.h" -#elif BUILD_TAEF_TEST +#else +#ifdef BUILD_TAEF_TEST #include "taefTestMacros.h" +#endif #endif \ No newline at end of file diff --git a/winml/test/common/testPch.h b/winml/test/common/testPch.h index 952f0bbccf6a1..4bc156fa0c1c2 100644 --- a/winml/test/common/testPch.h +++ b/winml/test/common/testPch.h @@ -3,7 +3,9 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // //----------------------------------------------------------------------------- +#ifndef _SILENCE_ALL_CXX17_DEPRECATION_WARNINGS #define _SILENCE_ALL_CXX17_DEPRECATION_WARNINGS +#endif #include "std.h" #include diff --git a/winml/test/scenario/cppwinrt/CustomNullOp.h b/winml/test/scenario/cppwinrt/CustomNullOp.h index d7dfdf75ac488..7d2e5efa899f4 100644 --- a/winml/test/scenario/cppwinrt/CustomNullOp.h +++ b/winml/test/scenario/cppwinrt/CustomNullOp.h @@ -4,14 +4,14 @@ #pragma once -#include +#include "test.h" template struct NullShapeInferrer : winrt::implements, IMLOperatorShapeInferrer> { STDMETHOD(InferOutputShapes)(IMLOperatorShapeInferenceContext* context) noexcept { - EXPECT_NO_THROW(OperatorHelper::ShapeInferenceFunction(context)); + WINML_EXPECT_NO_THROW(OperatorHelper::ShapeInferenceFunction(context)); return S_OK; } }; @@ -23,7 +23,7 @@ struct NullOperator : winrt::implements STDMETHOD(Compute)(IMLOperatorKernelContext* context) { winrt::com_ptr outputTensor; - EXPECT_HRESULT_SUCCEEDED(context->GetOutputTensor(0, outputTensor.put())); + WINML_EXPECT_HRESULT_SUCCEEDED(context->GetOutputTensor(0, outputTensor.put())); ++(*m_callCount); return S_OK; @@ -92,7 +92,7 @@ struct NullOperatorFactory : winrt::implements(callCount); - EXPECT_HRESULT_SUCCEEDED(registry->RegisterOperatorKernel( + WINML_EXPECT_HRESULT_SUCCEEDED(registry->RegisterOperatorKernel( &kernelDescription, factory.get(), shapeInferrer.get() diff --git a/winml/test/scenario/cppwinrt/CustomOperatorProvider.h b/winml/test/scenario/cppwinrt/CustomOperatorProvider.h index 215cd4c4f29b8..698eeeb396095 100644 --- a/winml/test/scenario/cppwinrt/CustomOperatorProvider.h +++ b/winml/test/scenario/cppwinrt/CustomOperatorProvider.h @@ -54,4 +54,4 @@ struct CustomOperatorProvider : m_registry.copy_to(ppOperatorRegistry); return S_OK; } -}; +}; \ No newline at end of file diff --git a/winml/test/scenario/cppwinrt/CustomOps.cpp b/winml/test/scenario/cppwinrt/CustomOps.cpp index a9ab916fbc1bd..eb47843b95de6 100644 --- a/winml/test/scenario/cppwinrt/CustomOps.cpp +++ b/winml/test/scenario/cppwinrt/CustomOps.cpp @@ -12,7 +12,7 @@ #include #include #include "CustomOperatorProvider.h" -#include "runtimeParameters.h" +#include "CustomOps.h" // For custom operator and shape inferencing support #include "core/providers/dml/DmlExecutionProvider/inc/MLOperatorAuthor.h" @@ -31,26 +31,19 @@ using namespace winrt::Windows::Graphics::Imaging; using namespace winrt::Windows::Storage; using namespace winrt::Windows::Storage::Streams; -class CustomOpsScenarioTest : public ::testing::Test +static void CustomOpsScenarioTestSetup() { -protected: - CustomOpsScenarioTest() { - init_apartment(); - } -}; + init_apartment(); +} -class CustomOpsScenarioGpuTest : public CustomOpsScenarioTest +static void CustomOpsScenarioGpuTestSetup() { -protected: - void SetUp() override - { - GPUTEST - } -}; + init_apartment(); + GPUTEST; +} // Tests that the execution provider correctly fuses operators together when custom ops are involved. -TEST_F(CustomOpsScenarioGpuTest, CustomOperatorFusion) -{ +static void CustomOperatorFusion() { constexpr const wchar_t* c_modelFilename = L"squeezenet_tensor_input.onnx"; // This particular model has 25 Conv ops and 25 Relu ops, all of which are eligible for fusion so we expect them @@ -96,7 +89,7 @@ TEST_F(CustomOpsScenarioGpuTest, CustomOperatorFusion) { using namespace OperatorHelper; - EXPECT_HRESULT_SUCCEEDED(MLCreateOperatorRegistry(m_registry.put())); + WINML_EXPECT_HRESULT_SUCCEEDED(MLCreateOperatorRegistry(m_registry.put())); #pragma push_macro("REGISTER_KERNEL") #define REGISTER_KERNEL(_name, _domain, _opSet, _shapeInferrer, _callCount) \ @@ -143,14 +136,14 @@ TEST_F(CustomOpsScenarioGpuTest, CustomOperatorFusion) auto provider = customOperatorProvider.as(); LearningModelDevice device = nullptr; - EXPECT_NO_THROW(device = LearningModelDevice(LearningModelDeviceKind::DirectX)); + WINML_EXPECT_NO_THROW(device = LearningModelDevice(LearningModelDeviceKind::DirectX)); std::wstring fullPath = FileHelpers::GetModulePath() + c_modelFilename; auto model = LearningModel::LoadFromFilePath(fullPath, provider); auto featureValue = FileHelpers::LoadImageFeatureValue(L"227x227.png"); LearningModelSession session = nullptr; - EXPECT_NO_THROW(session = LearningModelSession(model, device)); + WINML_EXPECT_NO_THROW(session = LearningModelSession(model, device)); LearningModelBinding modelBinding(session); modelBinding.Bind(L"data", featureValue); @@ -159,15 +152,15 @@ TEST_F(CustomOpsScenarioGpuTest, CustomOperatorFusion) const auto& callCounts = customOperatorProvider.as()->GetCallCounts(); // Verify that the correct number of each operator was seen (i.e. that none were dropped / incorrectly fused) - EXPECT_EQ(c_expectedConvOps, callCounts.conv); - EXPECT_EQ(c_expectedReluOps, callCounts.relu); - EXPECT_EQ(c_expectedFusedConvOps, callCounts.fusedConv); - EXPECT_EQ(c_expectedGemmOps, callCounts.gemm); - EXPECT_EQ(c_expectedSigmoidOps, callCounts.sigmoid); - EXPECT_EQ(c_expectedFusedGemmOps, callCounts.fusedGemm); - EXPECT_EQ(c_expectedBatchNormOps, callCounts.batchNorm); - EXPECT_EQ(c_expectedMaxPoolOps, callCounts.maxPool); - EXPECT_EQ(c_expectedConcatOps, callCounts.concat); + WINML_EXPECT_EQUAL(c_expectedConvOps, callCounts.conv); + WINML_EXPECT_EQUAL(c_expectedReluOps, callCounts.relu); + WINML_EXPECT_EQUAL(c_expectedFusedConvOps, callCounts.fusedConv); + WINML_EXPECT_EQUAL(c_expectedGemmOps, callCounts.gemm); + WINML_EXPECT_EQUAL(c_expectedSigmoidOps, callCounts.sigmoid); + WINML_EXPECT_EQUAL(c_expectedFusedGemmOps, callCounts.fusedGemm); + WINML_EXPECT_EQUAL(c_expectedBatchNormOps, callCounts.batchNorm); + WINML_EXPECT_EQUAL(c_expectedMaxPoolOps, callCounts.maxPool); + WINML_EXPECT_EQUAL(c_expectedConcatOps, callCounts.concat); } struct LocalCustomOperatorProvider : @@ -178,7 +171,7 @@ struct LocalCustomOperatorProvider : { LocalCustomOperatorProvider() { - EXPECT_HRESULT_SUCCEEDED(MLCreateOperatorRegistry(m_registry.put())); + WINML_EXPECT_HRESULT_SUCCEEDED(MLCreateOperatorRegistry(m_registry.put())); } STDMETHOD(GetRegistry)(IMLOperatorRegistry** ppOperatorRegistry) @@ -205,20 +198,20 @@ struct LocalCustomOperatorProvider : void VerifyTestAttributes(const MLOperatorAttributes& attrs) { std::string strAttr = attrs.GetAttribute("DefaultedNonRequiredString"); - EXPECT_EQ(strAttr, "1"); + WINML_EXPECT_EQUAL(strAttr, "1"); std::vector strArrayAttr = attrs.GetAttributeVector("DefaultedNonRequiredStringArray"); std::vector expected = std::vector({ "1", "2" }); for (size_t i = 0; i < expected.size(); ++i) { - EXPECT_EQ(strArrayAttr[i], expected[i]); + WINML_EXPECT_EQUAL(strArrayAttr[i], expected[i]); } - EXPECT_EQ(1, attrs.GetAttribute("DefaultedNonRequiredInt")); - EXPECT_EQ(1.0f, attrs.GetAttribute("DefaultedNonRequiredFloat")); + WINML_EXPECT_EQUAL(1, attrs.GetAttribute("DefaultedNonRequiredInt")); + WINML_EXPECT_EQUAL(1.0f, attrs.GetAttribute("DefaultedNonRequiredFloat")); - EXPECT_EQ(std::vector({ 1, 2 }), attrs.GetAttributeVector("DefaultedNonRequiredIntArray")); - EXPECT_EQ(std::vector({ 1.0f, 2.0f }), attrs.GetAttributeVector("DefaultedNonRequiredFloatArray")); + WINML_EXPECT_EQUAL(std::vector({ 1, 2 }), attrs.GetAttributeVector("DefaultedNonRequiredIntArray")); + WINML_EXPECT_EQUAL(std::vector({ 1.0f, 2.0f }), attrs.GetAttributeVector("DefaultedNonRequiredFloatArray")); } // Foo kernel which is doing Add and optionally truncates its output @@ -241,14 +234,14 @@ class FooKernel if (!Truncate) { com_ptr shapeInfo; - EXPECT_EQ(info.GetInterface()->HasTensorShapeDescription(), false); - EXPECT_HRESULT_FAILED(info.GetInterface()->GetTensorShapeDescription(shapeInfo.put())); + WINML_EXPECT_EQUAL(info.GetInterface()->HasTensorShapeDescription(), false); + WINML_EXPECT_HRESULT_FAILED(info.GetInterface()->GetTensorShapeDescription(shapeInfo.put())); } else { com_ptr shapeInfo; - EXPECT_EQ(info.GetInterface()->HasTensorShapeDescription(), true); - EXPECT_EQ(info.GetInterface()->GetTensorShapeDescription(shapeInfo.put()), S_OK); + WINML_EXPECT_EQUAL(info.GetInterface()->HasTensorShapeDescription(), true); + WINML_EXPECT_EQUAL(info.GetInterface()->GetTensorShapeDescription(shapeInfo.put()), S_OK); } } @@ -271,7 +264,7 @@ class FooKernel if (!Truncate) { com_ptr tensor; - EXPECT_HRESULT_FAILED(context.GetInterface()->GetOutputTensor(0, tensor.put())); + WINML_EXPECT_HRESULT_FAILED(context.GetInterface()->GetOutputTensor(0, tensor.put())); } else { @@ -308,7 +301,7 @@ void CreateTruncatedABIFooKernel(IMLOperatorKernelCreationContext* kernelInfo, I } // Test using a foo kernel which is doing Add, but register it as "Mul". -TEST_F(CustomOpsScenarioTest, CustomKernelWithBuiltInSchema) +static void CustomKernelWithBuiltInSchema() { // Create the registry auto operatorProvider = winrt::make(); @@ -337,7 +330,7 @@ TEST_F(CustomOpsScenarioTest, CustomKernelWithBuiltInSchema) }; Microsoft::WRL::ComPtr factory = wil::MakeOrThrow(CreateABIFooKernel); - EXPECT_HRESULT_SUCCEEDED(registry->RegisterOperatorKernel(&kernelDesc, factory.Get(), nullptr)); + WINML_EXPECT_HRESULT_SUCCEEDED(registry->RegisterOperatorKernel(&kernelDesc, factory.Get(), nullptr)); // Prepare inputs std::vector dimsX = { 3, 2 }; @@ -361,23 +354,23 @@ TEST_F(CustomOpsScenarioTest, CustomKernelWithBuiltInSchema) bindings.Bind(winrt::hstring(L"X"), inputTensor); auto outputValue = TensorFloat::Create(); - EXPECT_NO_THROW(bindings.Bind(L"Y", outputValue)); + WINML_EXPECT_NO_THROW(bindings.Bind(L"Y", outputValue)); // Evaluate the model hstring correlationId; - EXPECT_NO_THROW(session.Evaluate(bindings, correlationId)); + WINML_EXPECT_NO_THROW(session.Evaluate(bindings, correlationId)); // Check the result shape - EXPECT_EQ(expectedDimsY.size(), outputValue.Shape().Size()); + WINML_EXPECT_EQUAL(expectedDimsY.size(), outputValue.Shape().Size()); for (uint32_t j = 0; j < outputValue.Shape().Size(); j++) { - EXPECT_EQ(expectedDimsY.at(j), outputValue.Shape().GetAt(j)); + WINML_EXPECT_EQUAL(expectedDimsY.at(j), outputValue.Shape().GetAt(j)); } // Check the results auto buffer = outputValue.GetAsVectorView(); - EXPECT_TRUE(buffer != nullptr); - EXPECT_TRUE(std::equal(expectedValuesY.cbegin(), expectedValuesY.cend(), begin(buffer))); + WINML_EXPECT_TRUE(buffer != nullptr); + WINML_EXPECT_TRUE(std::equal(expectedValuesY.cbegin(), expectedValuesY.cend(), begin(buffer))); // Release the model before operatorProvider goes out of scope model = nullptr; @@ -404,7 +397,7 @@ class MLOperatorShapeInferrerFromFunc : public Microsoft::WRL::RuntimeClass< }; // Test using a custom kernel and schema, while verifying attribute defaults, type mapping, and inference methods -TEST_F(CustomOpsScenarioTest, CustomKernelWithCustomSchema) +static void CustomKernelWithCustomSchema() { // Test cases struct @@ -591,7 +584,7 @@ TEST_F(CustomOpsScenarioTest, CustomKernelWithCustomSchema) // Register the schema MLOperatorSetId opsetId = { "", 7 }; MLOperatorSchemaDescription* opSchemaDescs = &schemaDesc; - EXPECT_EQ(S_OK, registry->RegisterOperatorSetSchema( + WINML_EXPECT_EQUAL(S_OK, registry->RegisterOperatorSetSchema( &opsetId, 1, &opSchemaDescs, @@ -608,7 +601,7 @@ TEST_F(CustomOpsScenarioTest, CustomKernelWithCustomSchema) MLOperatorSetId id = { "", 9 }; MLOperatorSchemaDescription* schemaDescs = &futureSchemaDesc; - EXPECT_EQ(S_OK, registry->RegisterOperatorSetSchema( + WINML_EXPECT_EQUAL(S_OK, registry->RegisterOperatorSetSchema( &id, 7, &schemaDescs, @@ -624,7 +617,7 @@ TEST_F(CustomOpsScenarioTest, CustomKernelWithCustomSchema) MLOperatorSetId id = { "otherDomain", 7 }; MLOperatorSchemaDescription* schemaDescs = &otherSchemaDesc; - EXPECT_EQ(S_OK, registry->RegisterOperatorSetSchema( + WINML_EXPECT_EQUAL(S_OK, registry->RegisterOperatorSetSchema( &id, 1, &schemaDescs, @@ -661,12 +654,12 @@ TEST_F(CustomOpsScenarioTest, CustomKernelWithCustomSchema) kernelDesc.options = MLOperatorKernelOptions::AllowDynamicInputShapes; Microsoft::WRL::ComPtr factory = wil::MakeOrThrow(CreateABIFooKernel); - EXPECT_EQ(S_OK, registry->RegisterOperatorKernel(&kernelDesc, factory.Get(), nullptr)); + WINML_EXPECT_EQUAL(S_OK, registry->RegisterOperatorKernel(&kernelDesc, factory.Get(), nullptr)); } else { Microsoft::WRL::ComPtr factory = wil::MakeOrThrow(CreateTruncatedABIFooKernel); - EXPECT_EQ(S_OK, registry->RegisterOperatorKernel( + WINML_EXPECT_EQUAL(S_OK, registry->RegisterOperatorKernel( &kernelDesc, factory.Get(), testCases[caseIndex].useShapeInferenceInKernel ? shapeInferrer.Get() : nullptr @@ -699,23 +692,23 @@ TEST_F(CustomOpsScenarioTest, CustomKernelWithCustomSchema) bindings.Bind(winrt::hstring(L"X"), inputTensor); auto outputValue = TensorFloat::Create(); - EXPECT_NO_THROW(bindings.Bind(L"Y", outputValue)); + WINML_EXPECT_NO_THROW(bindings.Bind(L"Y", outputValue)); // Evaluate the model hstring correlationId; - EXPECT_NO_THROW(session.Evaluate(bindings, correlationId)); + WINML_EXPECT_NO_THROW(session.Evaluate(bindings, correlationId)); // Verify the result shape - EXPECT_EQ(expectedDimsY.size(), outputValue.Shape().Size()); + WINML_EXPECT_EQUAL(expectedDimsY.size(), outputValue.Shape().Size()); for (uint32_t j = 0; j < outputValue.Shape().Size(); j++) { - EXPECT_EQ(expectedDimsY.at(j), outputValue.Shape().GetAt(j)); + WINML_EXPECT_EQUAL(expectedDimsY.at(j), outputValue.Shape().GetAt(j)); } // Verify the result values auto buffer = outputValue.GetAsVectorView(); - EXPECT_TRUE(buffer != nullptr); - EXPECT_TRUE(std::equal(expectedValuesY.cbegin(), expectedValuesY.cend(), begin(buffer))); + WINML_EXPECT_TRUE(buffer != nullptr); + WINML_EXPECT_TRUE(std::equal(expectedValuesY.cbegin(), expectedValuesY.cend(), begin(buffer))); // Release the model before operatorProvider goes out of scope model = nullptr; @@ -724,7 +717,19 @@ TEST_F(CustomOpsScenarioTest, CustomKernelWithCustomSchema) { // Check that the shape inference context is closed and safely fails MLOperatorEdgeDescription edgeDesc; - EXPECT_EQ(E_INVALIDARG, shapeInferenceContext->GetInputEdgeDescription(0, &edgeDesc)); + WINML_EXPECT_EQUAL(E_INVALIDARG, shapeInferenceContext->GetInputEdgeDescription(0, &edgeDesc)); } } } + +const CustomOpsTestApi& getapi() { + static constexpr CustomOpsTestApi api = + { + CustomOpsScenarioTestSetup, + CustomOpsScenarioGpuTestSetup, + CustomOperatorFusion, + CustomKernelWithBuiltInSchema, + CustomKernelWithCustomSchema + }; + return api; +} \ No newline at end of file diff --git a/winml/test/scenario/cppwinrt/CustomOps.h b/winml/test/scenario/cppwinrt/CustomOps.h new file mode 100644 index 0000000000000..d0a8a3efd108c --- /dev/null +++ b/winml/test/scenario/cppwinrt/CustomOps.h @@ -0,0 +1,21 @@ +#include "test.h" +using VoidTest = void(*)(); +using SetupTest = VoidTest; +struct CustomOpsTestApi +{ + SetupTest CustomOpsScenarioTestSetup; + SetupTest CustomOpsScenarioGpuTestSetup; + VoidTest CustomOperatorFusion; + VoidTest CustomKernelWithBuiltInSchema; + VoidTest CustomKernelWithCustomSchema; +}; +const CustomOpsTestApi& getapi(); + +WINML_TEST_CLASS_BEGIN_WITH_SETUP(CustomOpsScenarioTest, CustomOpsScenarioTestSetup) +WINML_TEST(CustomOpsScenarioTest, CustomKernelWithBuiltInSchema) +WINML_TEST(CustomOpsScenarioTest, CustomKernelWithCustomSchema) +WINML_TEST_CLASS_END() + +WINML_TEST_CLASS_BEGIN_WITH_SETUP(CustomOpsScenarioGpuTest, CustomOpsScenarioGpuTestSetup) +WINML_TEST(CustomOpsScenarioGpuTest, CustomOperatorFusion) +WINML_TEST_CLASS_END() \ No newline at end of file diff --git a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp index 8b82934f3ffc4..1420b0c05e425 100644 --- a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp +++ b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.cpp @@ -19,7 +19,6 @@ #include "DeviceHelpers.h" #include "filehelpers.h" #include "robuffer.h" -#include "runtimeParameters.h" #include "Windows.AI.MachineLearning.Native.h" #include "Windows.Graphics.DirectX.Direct3D11.interop.h" #include "windows.ui.xaml.media.dxinterop.h" @@ -30,6 +29,7 @@ #include #include #include "scenariotestscppwinrt.h" +#include #if __has_include("dxcore.h") #define ENABLE_DXCORE 1 #endif @@ -62,7 +62,7 @@ static void ScenarioCppWinrtGpuSkipEdgeCoreTestSetup() { SKIP_EDGECORE }; -static void ScenarioCppWinrtTest_Sample1() { +static void Sample1() { LearningModel model = nullptr; std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; WINML_EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(filePath)); @@ -140,7 +140,7 @@ static void BindFeatures(LearningModelBinding binding, IVectorView DoEva return session.EvaluateAsync(binding, L""); } -static void ScenarioCppWinrtTest_Scenario5AsyncEval() { +static void Scenario5AsyncEval() { auto task = DoEvalAsync(); while (task.Status() == winrt::Windows::Foundation::AsyncStatus::Started) { @@ -233,7 +233,7 @@ static void ScenarioCppWinrtTest_Scenario5AsyncEval() { //! Scenario6: use BindInputWithProperties - BitmapBounds, BitmapPixelFormat // apparently this scenario is cut for rs5. - not cut, just rewprked. move props // to the image value when that is checked in. -static void ScenarioCppWinrtGpuTest_Scenario6BindWithProperties() { +static void Scenario6BindWithProperties() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -277,7 +277,7 @@ static void ScenarioCppWinrtGpuTest_Scenario6BindWithProperties() { } //! Scenario7: run eval without creating a binding object -static void ScenarioCppWinrtTest_Scenario7EvalWithNoBind() { +static void Scenario7EvalWithNoBind() { auto map = winrt::single_threaded_map(); // load a model @@ -297,7 +297,7 @@ static void ScenarioCppWinrtTest_Scenario7EvalWithNoBind() { //! Scenario8: choose which device to run the model on - PreferredDeviceType, PreferredDevicePerformance, SetDeviceFromSurface, SetDevice // create a session on the default device -static void ScenarioCppWinrtTest_Scenario8SetDeviceSampleDefault() { +static void Scenario8SetDeviceSampleDefault() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -307,7 +307,7 @@ static void ScenarioCppWinrtTest_Scenario8SetDeviceSampleDefault() { } // create a session on the CPU device -static void ScenarioCppWinrtTest_Scenario8SetDeviceSampleCPU() { +static void Scenario8SetDeviceSampleCPU() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -317,7 +317,7 @@ static void ScenarioCppWinrtTest_Scenario8SetDeviceSampleCPU() { } // create a session on the default DML device -static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleDefaultDirectX() { +static void Scenario8SetDeviceSampleDefaultDirectX() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -327,7 +327,7 @@ static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleDefaultDirectX() { } // create a session on the DML device that provides best power -static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMinPower() { +static void Scenario8SetDeviceSampleMinPower() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -337,7 +337,7 @@ static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMinPower() { } // create a session on the DML device that provides best perf -static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMaxPerf() { +static void Scenario8SetDeviceSampleMaxPerf() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -347,7 +347,7 @@ static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMaxPerf() { } // create a session on the same device my camera is on -static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMyCameraDevice() { +static void Scenario8SetDeviceSampleMyCameraDevice() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -369,12 +369,12 @@ static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMyCameraDevice() { LearningModelDevice dmlDeviceCamera = LearningModelDevice::CreateFromDirect3D11Device(direct3D11Device); LearningModelSession dmlSessionCamera(model, dmlDeviceCamera); } else { - GTEST_SKIP() << "Test skipped because video capture device is missing"; + WINML_SKIP_TEST("Test skipped because video capture device is missing"); } } // create a device from D3D11 Device -static void ScenarioCppWinrtGpuSkipEdgeCoreTest_Scenario8SetDeviceSampleD3D11Device() { +static void Scenario8SetDeviceSampleD3D11Device() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -386,7 +386,7 @@ static void ScenarioCppWinrtGpuSkipEdgeCoreTest_Scenario8SetDeviceSampleD3D11Dev nullptr, D3D_DRIVER_TYPE::D3D_DRIVER_TYPE_HARDWARE, nullptr, 0, nullptr, 0, D3D11_SDK_VERSION, pD3D11Device.put(), &fl, pContext.put()); if (FAILED(result)) { - GTEST_SKIP() << "Test skipped because d3d11 device is missing"; + WINML_SKIP_TEST("Test skipped because d3d11 device is missing"); } // get dxgiDevice from d3ddevice @@ -402,7 +402,7 @@ static void ScenarioCppWinrtGpuSkipEdgeCoreTest_Scenario8SetDeviceSampleD3D11Dev } // create a session on the a specific dx device that I chose some other way , note we have to use native interop here and pass a cmd queue -static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleCustomCommandQueue() { +static void Scenario8SetDeviceSampleCustomCommandQueue() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -410,7 +410,7 @@ static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleCustomCommandQueue() com_ptr pD3D12Device = nullptr; DeviceHelpers::AdapterEnumerationSupport support; if (FAILED(DeviceHelpers::GetAdapterEnumerationSupport(&support))) { - FAIL() << "Unable to load DXGI or DXCore"; + WINML_LOG_ERROR("Unable to load DXGI or DXCore"); return; } HRESULT result = S_OK; @@ -432,7 +432,7 @@ static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleCustomCommandQueue() #endif if (FAILED(result)) { - GTEST_SKIP() << "Test skipped because d3d12 device is missing"; + WINML_SKIP_TEST("Test skipped because d3d12 device is missing"); return; } com_ptr dxQueue = nullptr; @@ -449,7 +449,7 @@ static void ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleCustomCommandQueue() } //pass a Tensor in as an input GPU -static void ScenarioCppWinrtGpuTest_Scenario9LoadBindEvalInputTensorGPU() { +static void Scenario9LoadBindEvalInputTensorGPU() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"fns-candy.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -523,14 +523,14 @@ static void ScenarioCppWinrtGpuTest_Scenario9LoadBindEvalInputTensorGPU() { // Testing GetAsD3D12Resource com_ptr pReturnedResource; input1imagetensor.as()->GetD3D12Resource(pReturnedResource.put()); - EXPECT_EQ(pReturnedResource.get(), pGPUResource.get()); + WINML_EXPECT_EQUAL(pReturnedResource.get(), pGPUResource.get()); // Evaluate the model winrt::hstring correlationId; dmlSessionCustom.EvaluateAsync(modelBinding, correlationId).get(); } -static void ScenarioCppWinrtGpuTest_Scenario13SingleModelOnCPUandGPU() { +static void Scenario13SingleModelOnCPUandGPU() { std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); LearningModelSession cpuSession(model, LearningModelDevice(LearningModelDeviceKind::Cpu)); @@ -555,7 +555,7 @@ static void ScenarioCppWinrtGpuTest_Scenario13SingleModelOnCPUandGPU() { } // Validates when binding input image with free dimensions, the binding step is executed correctly. -static void ScenarioCppWinrtGpuTest_Scenario11FreeDimensionsTensor() { +static void Scenario11FreeDimensionsTensor() { std::wstring filePath = FileHelpers::GetModulePath() + L"free_dimensional_image_input.onnx"; // load a model with expected input size: -1 x -1 auto model = LearningModel::LoadFromFilePath(filePath); @@ -573,7 +573,7 @@ static void ScenarioCppWinrtGpuTest_Scenario11FreeDimensionsTensor() { session.Evaluate(binding, L""); } -static void ScenarioCppWinrtGpuTest_Scenario11FreeDimensionsImage() { +static void Scenario11FreeDimensionsImage() { std::wstring filePath = FileHelpers::GetModulePath() + L"free_dimensional_imageDes.onnx"; // load a model with expected input size: -1 x -1 auto model = LearningModel::LoadFromFilePath(filePath); @@ -619,7 +619,7 @@ void SubmitEval(LearningModel model, SwapChainEntry* sessionBindings, int swapch } //Scenario14:Load single model, run it mutliple times on a single gpu device using a fast swapchain pattern -static void ScenarioCppWinrtGpuTest_Scenario14RunModelSwapchain() { +static void Scenario14RunModelSwapchain() { const int swapchainentrycount = 3; SwapChainEntry sessionBindings[swapchainentrycount]; @@ -672,11 +672,11 @@ static void LoadBindEval_CustomOperator_CPU(const wchar_t* fileName) { WINML_EXPECT_NO_THROW(session.Evaluate(bindings, correlationId)); auto buffer = outputValue.GetAsVectorView(); - EXPECT_TRUE(buffer != nullptr); + WINML_EXPECT_TRUE(buffer != nullptr); } //! Scenario17 : Control the dev diagnostics features of WinML Tracing -static void ScenarioCppWinrtTest_Scenario17DevDiagnostics() { +static void Scenario17DevDiagnostics() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -701,21 +701,20 @@ static void ScenarioCppWinrtTest_Scenario17DevDiagnostics() { * even though CPU custom ops shouldn't be dependent on GPU functionality. * These should be reclassed to ScenarioCppWinrt once the DML code is decoupled from the custom op code. **/ - // create a session that loads a model with a branch new operator, register the custom operator, and load/bind/eval -static void ScenarioCppWinrtGpuTest_Scenario20aLoadBindEvalCustomOperatorCPU() { +static void Scenario20aLoadBindEvalCustomOperatorCPU() { std::wstring filePath = FileHelpers::GetModulePath() + L"noisy_relu.onnx"; LoadBindEval_CustomOperator_CPU(filePath.c_str()); } // create a session that loads a model with an overridden operator, register the replacement custom operator, and load/bind/eval -static void ScenarioCppWinrtGpuTest_Scenario20bLoadBindEvalReplacementCustomOperatorCPU() { +static void Scenario20bLoadBindEvalReplacementCustomOperatorCPU() { std::wstring filePath = FileHelpers::GetModulePath() + L"relu.onnx"; LoadBindEval_CustomOperator_CPU(filePath.c_str()); } //! Scenario21: Load two models, set them up to run chained after one another on the same gpu hardware device -static void ScenarioCppWinrtGpuTest_Scenario21RunModel2ChainZ() { +static void Scenario21RunModel2ChainZ() { // load a model, TODO: get a model that has an image descriptor std::wstring filePath = FileHelpers::GetModulePath() + L"fns-candy.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -757,9 +756,9 @@ static void ScenarioCppWinrtGpuTest_Scenario21RunModel2ChainZ() { bool VerifyHelper(ImageFeatureValue actual, ImageFeatureValue expected) { auto softwareBitmapActual = actual.VideoFrame().SoftwareBitmap(); auto softwareBitmapExpected = expected.VideoFrame().SoftwareBitmap(); - EXPECT_EQ(softwareBitmapActual.PixelHeight(), softwareBitmapExpected.PixelHeight()); - EXPECT_EQ(softwareBitmapActual.PixelWidth(), softwareBitmapExpected.PixelWidth()); - EXPECT_EQ(softwareBitmapActual.BitmapPixelFormat(), softwareBitmapExpected.BitmapPixelFormat()); + WINML_EXPECT_EQUAL(softwareBitmapActual.PixelHeight(), softwareBitmapExpected.PixelHeight()); + WINML_EXPECT_EQUAL(softwareBitmapActual.PixelWidth(), softwareBitmapExpected.PixelWidth()); + WINML_EXPECT_EQUAL(softwareBitmapActual.BitmapPixelFormat(), softwareBitmapExpected.BitmapPixelFormat()); // 4 means 4 channels uint32_t size = 4 * softwareBitmapActual.PixelHeight() * softwareBitmapActual.PixelWidth(); @@ -794,7 +793,7 @@ bool VerifyHelper(ImageFeatureValue actual, ImageFeatureValue expected) { return ((float)errors / size < cMaxErrorRate); } -static void ScenarioCppWinrtTest_Scenario22ImageBindingAsCPUTensor() { +static void Scenario22ImageBindingAsCPUTensor() { std::wstring modulePath = FileHelpers::GetModulePath(); std::wstring inputImagePath = modulePath + L"fish_720.png"; std::wstring bmImagePath = modulePath + L"bm_fish_720.jpg"; @@ -854,7 +853,7 @@ static void ScenarioCppWinrtTest_Scenario22ImageBindingAsCPUTensor() { bm_softwareBitmap = SoftwareBitmap::Convert(bm_softwareBitmap, BitmapPixelFormat::Bgra8); VideoFrame bm_videoFrame = VideoFrame::CreateWithSoftwareBitmap(bm_softwareBitmap); ImageFeatureValue bm_imagevalue = ImageFeatureValue::CreateFromVideoFrame(bm_videoFrame); - EXPECT_TRUE(VerifyHelper(bm_imagevalue, outputTensor)); + WINML_EXPECT_TRUE(VerifyHelper(bm_imagevalue, outputTensor)); // check the output video frame object by saving output image to disk std::wstring outputDataImageFileName = L"out_cpu_tensor_fish_720.jpg"; @@ -867,7 +866,7 @@ static void ScenarioCppWinrtTest_Scenario22ImageBindingAsCPUTensor() { encoder.FlushAsync().get(); } -static void ScenarioCppWinrtGpuTest_Scenario22ImageBindingAsGPUTensor() { +static void Scenario22ImageBindingAsGPUTensor() { std::wstring modulePath = FileHelpers::GetModulePath(); std::wstring inputImagePath = modulePath + L"fish_720.png"; std::wstring bmImagePath = modulePath + L"bm_fish_720.jpg"; @@ -1029,7 +1028,7 @@ static void ScenarioCppWinrtGpuTest_Scenario22ImageBindingAsGPUTensor() { bm_softwareBitmap = SoftwareBitmap::Convert(bm_softwareBitmap, BitmapPixelFormat::Rgba8); VideoFrame bm_videoFrame = VideoFrame::CreateWithSoftwareBitmap(bm_softwareBitmap); ImageFeatureValue bm_imagevalue = ImageFeatureValue::CreateFromVideoFrame(bm_videoFrame); - EXPECT_TRUE(VerifyHelper(bm_imagevalue, outputTensor)); + WINML_EXPECT_TRUE(VerifyHelper(bm_imagevalue, outputTensor)); //check the output video frame object StorageFolder currentfolder = StorageFolder::GetFolderFromPathAsync(modulePath).get(); @@ -1041,7 +1040,7 @@ static void ScenarioCppWinrtGpuTest_Scenario22ImageBindingAsGPUTensor() { encoder.FlushAsync().get(); } -static void ScenarioCppWinrtTest_QuantizedModels() { +static void QuantizedModels() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"onnxzoo_lotus_inception_v1-dq.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -1060,7 +1059,7 @@ static void ScenarioCppWinrtTest_QuantizedModels() { WINML_EXPECT_NO_THROW(session.Evaluate(binding, filePath)); } -static void ScenarioCppWinrtGpuTest_MsftQuantizedModels() { +static void MsftQuantizedModels() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"coreml_Resnet50_ImageNet-dq.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -1084,7 +1083,7 @@ static void ScenarioCppWinrtGpuTest_MsftQuantizedModels() { WINML_EXPECT_NO_THROW(session.Evaluate(binding, filePath)); } -static void ScenarioCppWinrtGpuTest_SyncVsAsync() { +static void SyncVsAsync() { // create model, device and session LearningModel model = nullptr; WINML_EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(FileHelpers::GetModulePath() + L"fns-candy.onnx")); @@ -1151,26 +1150,26 @@ static void ScenarioCppWinrtGpuTest_SyncVsAsync() { std::cout << "Asynchronous time for " << N << " evaluations: " << asyncTime.count() << " milliseconds\n"; } -static void ScenarioCppWinrtGpuTest_CustomCommandQueueWithFence() { +static void CustomCommandQueueWithFence() { static const wchar_t* const modelFileName = L"fns-candy.onnx"; static const wchar_t* const inputDataImageFileName = L"fish_720.png"; com_ptr d3d12Device; - EXPECT_HRESULT_SUCCEEDED(D3D12CreateDevice(nullptr, D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device), d3d12Device.put_void())); + WINML_EXPECT_HRESULT_SUCCEEDED(D3D12CreateDevice(nullptr, D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device), d3d12Device.put_void())); D3D12_COMMAND_QUEUE_DESC queueDesc = {}; queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT; com_ptr queue; - EXPECT_HRESULT_SUCCEEDED(d3d12Device->CreateCommandQueue(&queueDesc, __uuidof(ID3D12CommandQueue), queue.put_void())); + WINML_EXPECT_HRESULT_SUCCEEDED(d3d12Device->CreateCommandQueue(&queueDesc, __uuidof(ID3D12CommandQueue), queue.put_void())); com_ptr fence; - EXPECT_HRESULT_SUCCEEDED(d3d12Device->CreateFence(0, D3D12_FENCE_FLAG_NONE, __uuidof(ID3D12Fence), fence.put_void())); + WINML_EXPECT_HRESULT_SUCCEEDED(d3d12Device->CreateFence(0, D3D12_FENCE_FLAG_NONE, __uuidof(ID3D12Fence), fence.put_void())); auto devicefactory = get_activation_factory(); com_ptr<::IUnknown> learningModelDeviceUnknown; - EXPECT_HRESULT_SUCCEEDED(devicefactory->CreateFromD3D12CommandQueue(queue.get(), learningModelDeviceUnknown.put())); + WINML_EXPECT_HRESULT_SUCCEEDED(devicefactory->CreateFromD3D12CommandQueue(queue.get(), learningModelDeviceUnknown.put())); LearningModelDevice device = nullptr; WINML_EXPECT_NO_THROW(learningModelDeviceUnknown.as(device)); @@ -1211,9 +1210,9 @@ static void ScenarioCppWinrtGpuTest_CustomCommandQueueWithFence() { // until after the wait is unblocked, and the signal should not complete until model evaluation does. This can // only be true if WinML executes the workload on the supplied queue (instead of using its own). - EXPECT_HRESULT_SUCCEEDED(queue->Wait(fence.get(), 1)); + WINML_EXPECT_HRESULT_SUCCEEDED(queue->Wait(fence.get(), 1)); - EXPECT_HRESULT_SUCCEEDED(queue->Signal(fence.get(), 2)); + WINML_EXPECT_HRESULT_SUCCEEDED(queue->Signal(fence.get(), 2)); winrt::hstring correlationId; winrt::Windows::Foundation::IAsyncOperation asyncOp; @@ -1222,20 +1221,20 @@ static void ScenarioCppWinrtGpuTest_CustomCommandQueueWithFence() { Sleep(1000); // Give the model a chance to run (which it shouldn't if everything is working correctly) // Because we haven't unblocked the wait yet, model evaluation must not have completed (nor the fence signal) - EXPECT_NE(asyncOp.Status(), winrt::Windows::Foundation::AsyncStatus::Completed); - EXPECT_EQ(fence->GetCompletedValue(), 0); + WINML_EXPECT_NOT_EQUAL(asyncOp.Status(), winrt::Windows::Foundation::AsyncStatus::Completed); + WINML_EXPECT_EQUAL(fence->GetCompletedValue(), 0); // Unblock the queue - EXPECT_HRESULT_SUCCEEDED(fence->Signal(1)); + WINML_EXPECT_HRESULT_SUCCEEDED(fence->Signal(1)); // Wait for model evaluation to complete asyncOp.get(); // The fence must be signaled by now (because model evaluation has completed) - EXPECT_EQ(fence->GetCompletedValue(), 2); + WINML_EXPECT_EQUAL(fence->GetCompletedValue(), 2); } -static void ScenarioCppWinrtGpuTest_ReuseVideoFrame() { +static void ReuseVideoFrame() { std::wstring modulePath = FileHelpers::GetModulePath(); std::wstring inputImagePath = modulePath + L"fish_720.png"; std::wstring bmImagePath = modulePath + L"bm_fish_720.jpg"; @@ -1286,12 +1285,12 @@ static void ScenarioCppWinrtGpuTest_ReuseVideoFrame() { bm_softwareBitmap = SoftwareBitmap::Convert(bm_softwareBitmap, BitmapPixelFormat::Bgra8); VideoFrame bm_videoFrame = VideoFrame::CreateWithSoftwareBitmap(bm_softwareBitmap); ImageFeatureValue bm_imagevalue = ImageFeatureValue::CreateFromVideoFrame(bm_videoFrame); - EXPECT_TRUE(VerifyHelper(bm_imagevalue, outputTensor)); + WINML_EXPECT_TRUE(VerifyHelper(bm_imagevalue, outputTensor)); } } } } -static void ScenarioCppWinrtTest_EncryptedStream() { +static void EncryptedStream() { // get a stream std::wstring path = FileHelpers::GetModulePath() + L"model.onnx"; auto storageFile = StorageFile::GetFileFromPathAsync(path).get(); @@ -1309,11 +1308,11 @@ static void ScenarioCppWinrtTest_EncryptedStream() { // verify loading the encrypted stream fails appropriately. auto encryptedStream = InMemoryRandomAccessStream(); encryptedStream.WriteAsync(encryptedBuffer).get(); - EXPECT_THROW_SPECIFIC(LearningModel::LoadFromStream(RandomAccessStreamReference::CreateFromStream(encryptedStream)), - winrt::hresult_error, - [](const winrt::hresult_error& e) -> bool { - return e.code() == E_INVALIDARG; - }); + WINML_EXPECT_THROW_SPECIFIC(LearningModel::LoadFromStream(RandomAccessStreamReference::CreateFromStream(encryptedStream)), + winrt::hresult_error, + [](const winrt::hresult_error& e) -> bool { + return e.code() == E_INVALIDARG; + }); // now decrypt auto decryptedBuffer = winrt::Windows::Security::Cryptography::Core::CryptographicEngine::Decrypt(key, encryptedBuffer, iv); @@ -1327,7 +1326,7 @@ static void ScenarioCppWinrtTest_EncryptedStream() { WINML_EXPECT_NO_THROW(session = LearningModelSession(model)); } -void DeviceLostRecoveryHelper() { +static void DeviceLostRecovery() { // load a model std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); @@ -1348,7 +1347,7 @@ void DeviceLostRecoveryHelper() { // evaluate should fail try { session.Evaluate(binding, L""); - FAIL() << "Evaluate should fail after removing the device"; + WINML_LOG_ERROR("Evaluate should fail after removing the device"); } catch (...) { } @@ -1357,37 +1356,28 @@ void DeviceLostRecoveryHelper() { binding = nullptr; // create new session and binding and try again! - session = LearningModelSession(model, LearningModelDevice(LearningModelDeviceKind::DirectX)); - binding = LearningModelBinding(session); + WINML_EXPECT_NO_THROW(session = LearningModelSession(model, LearningModelDevice(LearningModelDeviceKind::DirectX))); + WINML_EXPECT_NO_THROW(binding = LearningModelBinding(session)); BindFeatures(binding, model.InputFeatures()); - session.Evaluate(binding, L""); - exit(0); -} - -static void ScenarioCppWinrtGpuTestDeathTest_DeviceLostRecovery() { - ::testing::FLAGS_gtest_death_test_style = "threadsafe"; - EXPECT_EXIT( - DeviceLostRecoveryHelper(), - ::testing::ExitedWithCode(0), - ""); + WINML_EXPECT_NO_THROW(session.Evaluate(binding, L"")); } -static void ScenarioCppWinrtGpuSkipEdgeCoreTest_D2DInterop() { +static void D2DInterop() { // load a model (model.onnx == squeezenet[1,3,224,224]) std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; LearningModel model = LearningModel::LoadFromFilePath(filePath); // create a dx12 device com_ptr device = nullptr; - EXPECT_HRESULT_SUCCEEDED(D3D12CreateDevice(NULL, D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device1), device.put_void())); + WINML_EXPECT_HRESULT_SUCCEEDED(D3D12CreateDevice(NULL, D3D_FEATURE_LEVEL_11_0, __uuidof(ID3D12Device1), device.put_void())); // now create a command queue from it com_ptr commandQueue = nullptr; D3D12_COMMAND_QUEUE_DESC queueDesc = {}; queueDesc.Type = D3D12_COMMAND_LIST_TYPE_DIRECT; - EXPECT_HRESULT_SUCCEEDED(device->CreateCommandQueue(&queueDesc, winrt::guid_of(), commandQueue.put_void())); + WINML_EXPECT_HRESULT_SUCCEEDED(device->CreateCommandQueue(&queueDesc, winrt::guid_of(), commandQueue.put_void())); // create a winml learning device based on that dx12 queue auto factory = get_activation_factory(); com_ptr<::IUnknown> spUnk; - EXPECT_HRESULT_SUCCEEDED(factory->CreateFromD3D12CommandQueue(commandQueue.get(), spUnk.put())); + WINML_EXPECT_HRESULT_SUCCEEDED(factory->CreateFromD3D12CommandQueue(commandQueue.get(), spUnk.put())); auto learningDevice = spUnk.as(); // create a winml session from that dx device LearningModelSession session(model, learningDevice); @@ -1400,19 +1390,63 @@ static void ScenarioCppWinrtGpuSkipEdgeCoreTest_D2DInterop() { // create a D2D factory D2D1_FACTORY_OPTIONS options = {}; com_ptr d2dFactory; - EXPECT_HRESULT_SUCCEEDED(D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, __uuidof(ID2D1Factory), &options, d2dFactory.put_void())); + WINML_EXPECT_HRESULT_SUCCEEDED(D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, __uuidof(ID2D1Factory), &options, d2dFactory.put_void())); // grab the dxgi surface back from our video frame com_ptr dxgiSurface; com_ptr dxgiInterfaceAccess = frame.Direct3DSurface().as(); - EXPECT_HRESULT_SUCCEEDED(dxgiInterfaceAccess->GetInterface(__uuidof(IDXGISurface), dxgiSurface.put_void())); + WINML_EXPECT_HRESULT_SUCCEEDED(dxgiInterfaceAccess->GetInterface(__uuidof(IDXGISurface), dxgiSurface.put_void())); // and try and use our surface to create a render targer com_ptr renderTarget; D2D1_RENDER_TARGET_PROPERTIES props = D2D1::RenderTargetProperties(); props.pixelFormat = D2D1::PixelFormat( DXGI_FORMAT_B8G8R8A8_UNORM, D2D1_ALPHA_MODE_IGNORE); - EXPECT_HRESULT_SUCCEEDED(d2dFactory->CreateDxgiSurfaceRenderTarget( + WINML_EXPECT_HRESULT_SUCCEEDED(d2dFactory->CreateDxgiSurfaceRenderTarget( dxgiSurface.get(), props, renderTarget.put())); } + +const ScenarioTestApi& getapi() { + static constexpr ScenarioTestApi api = + { + ScenarioCppWinrtTestSetup, + ScenarioCppWinrtGpuTestSetup, + ScenarioCppWinrtGpuSkipEdgeCoreTestSetup, + Sample1, + Scenario1LoadBindEvalDefault, + Scenario2LoadModelFromStream, + Scenario5AsyncEval, + Scenario7EvalWithNoBind, + Scenario8SetDeviceSampleDefault, + Scenario8SetDeviceSampleCPU, + Scenario17DevDiagnostics, + Scenario22ImageBindingAsCPUTensor, + QuantizedModels, + EncryptedStream, + Scenario3SoftwareBitmapInputBinding, + Scenario6BindWithProperties, + Scenario8SetDeviceSampleDefaultDirectX, + Scenario8SetDeviceSampleMinPower, + Scenario8SetDeviceSampleMaxPerf, + Scenario8SetDeviceSampleMyCameraDevice, + Scenario8SetDeviceSampleCustomCommandQueue, + Scenario9LoadBindEvalInputTensorGPU, + Scenario13SingleModelOnCPUandGPU, + Scenario11FreeDimensionsTensor, + Scenario11FreeDimensionsImage, + Scenario14RunModelSwapchain, + Scenario20aLoadBindEvalCustomOperatorCPU, + Scenario20bLoadBindEvalReplacementCustomOperatorCPU, + Scenario21RunModel2ChainZ, + Scenario22ImageBindingAsGPUTensor, + MsftQuantizedModels, + SyncVsAsync, + CustomCommandQueueWithFence, + ReuseVideoFrame, + DeviceLostRecovery, + Scenario8SetDeviceSampleD3D11Device, + D2DInterop, + }; + return api; +} \ No newline at end of file diff --git a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h index 87962552fc5f3..48dfcbd9b6c23 100644 --- a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h +++ b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h @@ -1,47 +1,87 @@ #include "test.h" +using VoidTest = void(*)(); +using SetupTest = VoidTest; +struct ScenarioTestApi +{ + SetupTest ScenarioCppWinrtTestSetup; + SetupTest ScenarioCppWinrtGpuTestSetup; + SetupTest ScenarioCppWinrtGpuSkipEdgeCoreTestSetup; + VoidTest Sample1; + VoidTest Scenario1LoadBindEvalDefault; + VoidTest Scenario2LoadModelFromStream; + VoidTest Scenario5AsyncEval; + VoidTest Scenario7EvalWithNoBind; + VoidTest Scenario8SetDeviceSampleDefault; + VoidTest Scenario8SetDeviceSampleCPU; + VoidTest Scenario17DevDiagnostics; + VoidTest DISABLED_Scenario22ImageBindingAsCPUTensor; + VoidTest QuantizedModels; + VoidTest EncryptedStream; + VoidTest Scenario3SoftwareBitmapInputBinding; + VoidTest Scenario6BindWithProperties; + VoidTest Scenario8SetDeviceSampleDefaultDirectX; + VoidTest Scenario8SetDeviceSampleMinPower; + VoidTest Scenario8SetDeviceSampleMaxPerf; + VoidTest Scenario8SetDeviceSampleMyCameraDevice; + VoidTest Scenario8SetDeviceSampleCustomCommandQueue; + VoidTest DISABLED_Scenario9LoadBindEvalInputTensorGPU; + VoidTest Scenario13SingleModelOnCPUandGPU; + VoidTest Scenario11FreeDimensionsTensor; + VoidTest Scenario11FreeDimensionsImage; + VoidTest Scenario14RunModelSwapchain; + VoidTest Scenario20aLoadBindEvalCustomOperatorCPU; + VoidTest Scenario20bLoadBindEvalReplacementCustomOperatorCPU; + VoidTest DISABLED_Scenario21RunModel2ChainZ; + VoidTest DISABLED_Scenario22ImageBindingAsGPUTensor; + VoidTest MsftQuantizedModels; + VoidTest DISABLED_SyncVsAsync; + VoidTest DISABLED_CustomCommandQueueWithFence; + VoidTest DISABLED_ReuseVideoFrame; + VoidTest DeviceLostRecovery; + VoidTest Scenario8SetDeviceSampleD3D11Device; + VoidTest D2DInterop; +}; +const ScenarioTestApi& getapi(); WINML_TEST_CLASS_BEGIN_WITH_SETUP(ScenarioCppWinrtTest, ScenarioCppWinrtTestSetup) -WINML_TEST(ScenarioCppWinrtTest, Sample1, ScenarioCppWinrtTest_Sample1) -WINML_TEST(ScenarioCppWinrtTest, Scenario1LoadBindEvalDefault, ScenarioCppWinrtTest_Scenario1LoadBindEvalDefault) -WINML_TEST(ScenarioCppWinrtTest, Scenario2LoadModelFromStream, ScenarioCppWinrtTest_Scenario2LoadModelFromStream) -WINML_TEST(ScenarioCppWinrtTest, Scenario5AsyncEval, ScenarioCppWinrtTest_Scenario5AsyncEval) -WINML_TEST(ScenarioCppWinrtTest, Scenario7EvalWithNoBind, ScenarioCppWinrtTest_Scenario7EvalWithNoBind) -WINML_TEST(ScenarioCppWinrtTest, Scenario8SetDeviceSampleDefault, ScenarioCppWinrtTest_Scenario8SetDeviceSampleDefault) -WINML_TEST(ScenarioCppWinrtTest, Scenario8SetDeviceSampleCPU, ScenarioCppWinrtTest_Scenario8SetDeviceSampleCPU) -WINML_TEST(ScenarioCppWinrtTest, Scenario17DevDiagnostics, ScenarioCppWinrtTest_Scenario17DevDiagnostics) -WINML_TEST(ScenarioCppWinrtTest, DISABLED_Scenario22ImageBindingAsCPUTensor, ScenarioCppWinrtTest_Scenario22ImageBindingAsCPUTensor) -WINML_TEST(ScenarioCppWinrtTest, QuantizedModels, ScenarioCppWinrtTest_QuantizedModels) -WINML_TEST(ScenarioCppWinrtTest, EncryptedStream, ScenarioCppWinrtTest_EncryptedStream) +WINML_TEST(ScenarioCppWinrtTest, Sample1) +WINML_TEST(ScenarioCppWinrtTest, Scenario1LoadBindEvalDefault) +WINML_TEST(ScenarioCppWinrtTest, Scenario2LoadModelFromStream) +WINML_TEST(ScenarioCppWinrtTest, Scenario5AsyncEval) +WINML_TEST(ScenarioCppWinrtTest, Scenario7EvalWithNoBind) +WINML_TEST(ScenarioCppWinrtTest, Scenario8SetDeviceSampleDefault) +WINML_TEST(ScenarioCppWinrtTest, Scenario8SetDeviceSampleCPU) +WINML_TEST(ScenarioCppWinrtTest, Scenario17DevDiagnostics) +WINML_TEST(ScenarioCppWinrtTest, DISABLED_Scenario22ImageBindingAsCPUTensor) +WINML_TEST(ScenarioCppWinrtTest, QuantizedModels) +WINML_TEST(ScenarioCppWinrtTest, EncryptedStream) WINML_TEST_CLASS_END() WINML_TEST_CLASS_BEGIN_WITH_SETUP(ScenarioCppWinrtGpuTest, ScenarioCppWinrtGpuTestSetup) -WINML_TEST(ScenarioCppWinrtGpuTest, Scenario3SoftwareBitmapInputBinding, ScenarioCppWinrtGpuTest_Scenario3SoftwareBitmapInputBinding) -WINML_TEST(ScenarioCppWinrtGpuTest, Scenario6BindWithProperties, ScenarioCppWinrtGpuTest_Scenario6BindWithProperties) -WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleDefaultDirectX, ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleDefaultDirectX) -WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMinPower, ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMinPower) -WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMaxPerf, ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMaxPerf) -WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMyCameraDevice, ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleMyCameraDevice) -WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleCustomCommandQueue, ScenarioCppWinrtGpuTest_Scenario8SetDeviceSampleCustomCommandQueue) -WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario9LoadBindEvalInputTensorGPU, ScenarioCppWinrtGpuTest_Scenario9LoadBindEvalInputTensorGPU) -WINML_TEST(ScenarioCppWinrtGpuTest, Scenario13SingleModelOnCPUandGPU, ScenarioCppWinrtGpuTest_Scenario13SingleModelOnCPUandGPU) -WINML_TEST(ScenarioCppWinrtGpuTest, Scenario11FreeDimensionsTensor, ScenarioCppWinrtGpuTest_Scenario11FreeDimensionsTensor) -WINML_TEST(ScenarioCppWinrtGpuTest, Scenario11FreeDimensionsImage, ScenarioCppWinrtGpuTest_Scenario11FreeDimensionsImage) -WINML_TEST(ScenarioCppWinrtGpuTest, Scenario14RunModelSwapchain, ScenarioCppWinrtGpuTest_Scenario14RunModelSwapchain) -WINML_TEST(ScenarioCppWinrtGpuTest, Scenario20aLoadBindEvalCustomOperatorCPU, ScenarioCppWinrtGpuTest_Scenario20aLoadBindEvalCustomOperatorCPU) -WINML_TEST(ScenarioCppWinrtGpuTest, Scenario20bLoadBindEvalReplacementCustomOperatorCPU, ScenarioCppWinrtGpuTest_Scenario20bLoadBindEvalReplacementCustomOperatorCPU) -WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario21RunModel2ChainZ, ScenarioCppWinrtGpuTest_Scenario21RunModel2ChainZ) -WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario22ImageBindingAsGPUTensor, ScenarioCppWinrtGpuTest_Scenario22ImageBindingAsGPUTensor) -WINML_TEST(ScenarioCppWinrtGpuTest, MsftQuantizedModels, ScenarioCppWinrtGpuTest_MsftQuantizedModels) -WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_SyncVsAsync, ScenarioCppWinrtGpuTest_SyncVsAsync) -WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_CustomCommandQueueWithFence, ScenarioCppWinrtGpuTest_CustomCommandQueueWithFence) -WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_ReuseVideoFrame, ScenarioCppWinrtGpuTest_ReuseVideoFrame) -WINML_TEST_CLASS_END() - -WINML_TEST_CLASS_BEGIN_WITH_SETUP(ScenarioCppWinrtGpuTestDeathTest, ScenarioCppWinrtGpuTestSetup) -WINML_TEST(ScenarioCppWinrtGpuTestDeathTest, DeviceLostRecovery, ScenarioCppWinrtGpuTestDeathTest_DeviceLostRecovery) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario3SoftwareBitmapInputBinding) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario6BindWithProperties) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleDefaultDirectX) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMinPower) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMaxPerf) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleMyCameraDevice) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario8SetDeviceSampleCustomCommandQueue) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario9LoadBindEvalInputTensorGPU) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario13SingleModelOnCPUandGPU) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario11FreeDimensionsTensor) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario11FreeDimensionsImage) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario14RunModelSwapchain) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario20aLoadBindEvalCustomOperatorCPU) +WINML_TEST(ScenarioCppWinrtGpuTest, Scenario20bLoadBindEvalReplacementCustomOperatorCPU) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario21RunModel2ChainZ) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_Scenario22ImageBindingAsGPUTensor) +WINML_TEST(ScenarioCppWinrtGpuTest, MsftQuantizedModels) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_SyncVsAsync) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_CustomCommandQueueWithFence) +WINML_TEST(ScenarioCppWinrtGpuTest, DISABLED_ReuseVideoFrame) +WINML_TEST(ScenarioCppWinrtGpuTest, DeviceLostRecovery) WINML_TEST_CLASS_END() WINML_TEST_CLASS_BEGIN_WITH_SETUP(ScenarioCppWinrtGpuSkipEdgeCoreTest, ScenarioCppWinrtGpuSkipEdgeCoreTestSetup) -WINML_TEST(ScenarioCppWinrtGpuSkipEdgeCoreTest, Scenario8SetDeviceSampleD3D11Device, ScenarioCppWinrtGpuSkipEdgeCoreTest_Scenario8SetDeviceSampleD3D11Device) -WINML_TEST(ScenarioCppWinrtGpuSkipEdgeCoreTest, D2DInterop, ScenarioCppWinrtGpuSkipEdgeCoreTest_D2DInterop) -WINML_TEST_CLASS_END() +WINML_TEST(ScenarioCppWinrtGpuSkipEdgeCoreTest, Scenario8SetDeviceSampleD3D11Device ) +WINML_TEST(ScenarioCppWinrtGpuSkipEdgeCoreTest, D2DInterop) +WINML_TEST_CLASS_END() \ No newline at end of file From 031a0538811ad4f0701be023c50ea3ecbddc8930 Mon Sep 17 00:00:00 2001 From: Ryan Lai Date: Thu, 9 Jan 2020 16:35:55 -0800 Subject: [PATCH 4/6] PR comments --- winml/test/common/test.h | 3 +++ winml/test/scenario/cppwinrt/CustomOps.h | 2 -- winml/test/scenario/cppwinrt/scenariotestscppwinrt.h | 2 -- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/winml/test/common/test.h b/winml/test/common/test.h index f3376d4bd6ed1..31739f672503f 100644 --- a/winml/test/common/test.h +++ b/winml/test/common/test.h @@ -1,3 +1,6 @@ +using VoidTest = void (*)(); +using SetupTest = VoidTest; + #ifdef BUILD_GOOGLE_TEST #include "googleTestMacros.h" #else diff --git a/winml/test/scenario/cppwinrt/CustomOps.h b/winml/test/scenario/cppwinrt/CustomOps.h index d0a8a3efd108c..df48bd51af990 100644 --- a/winml/test/scenario/cppwinrt/CustomOps.h +++ b/winml/test/scenario/cppwinrt/CustomOps.h @@ -1,6 +1,4 @@ #include "test.h" -using VoidTest = void(*)(); -using SetupTest = VoidTest; struct CustomOpsTestApi { SetupTest CustomOpsScenarioTestSetup; diff --git a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h index 48dfcbd9b6c23..9e49cf1d3752d 100644 --- a/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h +++ b/winml/test/scenario/cppwinrt/scenariotestscppwinrt.h @@ -1,6 +1,4 @@ #include "test.h" -using VoidTest = void(*)(); -using SetupTest = VoidTest; struct ScenarioTestApi { SetupTest ScenarioCppWinrtTestSetup; From 1e8cc07ecec278685ac98f0d2d87ef4019e33309 Mon Sep 17 00:00:00 2001 From: Ryan Lai Date: Mon, 13 Jan 2020 14:29:27 -0800 Subject: [PATCH 5/6] Refactor winml api tests --- cmake/winml_unittests.cmake | 1 + winml/test/api/APITest.h | 52 +-- winml/test/api/LearningModelAPITest.cpp | 369 +++++++++--------- winml/test/api/LearningModelAPITest.h | 41 ++ .../test/api/LearningModelBindingAPITest.cpp | 354 +++++++++-------- winml/test/api/LearningModelBindingAPITest.h | 49 +++ .../test/api/LearningModelSessionAPITest.cpp | 285 ++++++++------ winml/test/api/LearningModelSessionAPITest.h | 44 +++ winml/test/common/googleTestMacros.h | 4 +- winml/test/common/taefTestMacros.h | 4 +- 10 files changed, 707 insertions(+), 496 deletions(-) create mode 100644 winml/test/api/LearningModelAPITest.h create mode 100644 winml/test/api/LearningModelBindingAPITest.h create mode 100644 winml/test/api/LearningModelSessionAPITest.h diff --git a/cmake/winml_unittests.cmake b/cmake/winml_unittests.cmake index 95661e85d8bc2..4bdbb34432466 100644 --- a/cmake/winml_unittests.cmake +++ b/cmake/winml_unittests.cmake @@ -69,6 +69,7 @@ add_winml_test( SOURCES ${winml_test_api_src} LIBS winml_test_common ) +target_compile_definitions(winml_test_api PRIVATE BUILD_GOOGLE_TEST) target_precompiled_header(winml_test_api testPch.h) if (onnxruntime_USE_DML) diff --git a/winml/test/api/APITest.h b/winml/test/api/APITest.h index 68e68f724ddf5..fd40a19b787f4 100644 --- a/winml/test/api/APITest.h +++ b/winml/test/api/APITest.h @@ -5,37 +5,25 @@ //----------------------------------------------------------------------------- #pragma once +#include "fileHelpers.h" +namespace APITest { +static void LoadModel(const std::wstring& modelPath, + winrt::Windows::AI::MachineLearning::LearningModel& learningModel) { + std::wstring fullPath = FileHelpers::GetModulePath() + modelPath; + learningModel = winrt::Windows::AI::MachineLearning::LearningModel::LoadFromFilePath(fullPath); +}; -#include - -class APITest : public ::testing::Test -{ -protected: - void LoadModel(const std::wstring& modelPath) - { - std::wstring fullPath = FileHelpers::GetModulePath() + modelPath; - m_model = winrt::Windows::AI::MachineLearning::LearningModel::LoadFromFilePath(fullPath); - } - - winrt::Windows::AI::MachineLearning::LearningModel m_model = nullptr; - winrt::Windows::AI::MachineLearning::LearningModelDevice m_device = nullptr; - winrt::Windows::AI::MachineLearning::LearningModelSession m_session = nullptr; - - uint64_t GetAdapterIdQuadPart() - { - LARGE_INTEGER id; - id.LowPart = m_device.AdapterId().LowPart; - id.HighPart = m_device.AdapterId().HighPart; - return id.QuadPart; - }; - - _LUID GetAdapterIdAsLUID() - { - _LUID id; - id.LowPart = m_device.AdapterId().LowPart; - id.HighPart = m_device.AdapterId().HighPart; - return id; - } - - bool m_runGPUTests = true; +static uint64_t GetAdapterIdQuadPart(winrt::Windows::AI::MachineLearning::LearningModelDevice& device) { + LARGE_INTEGER id; + id.LowPart = device.AdapterId().LowPart; + id.HighPart = device.AdapterId().HighPart; + return id.QuadPart; }; + +static _LUID GetAdapterIdAsLUID(winrt::Windows::AI::MachineLearning::LearningModelDevice& device) { + _LUID id; + id.LowPart = device.AdapterId().LowPart; + id.HighPart = device.AdapterId().HighPart; + return id; +} +}; // namespace APITest diff --git a/winml/test/api/LearningModelAPITest.cpp b/winml/test/api/LearningModelAPITest.cpp index ad592a9a86301..639940551d580 100644 --- a/winml/test/api/LearningModelAPITest.cpp +++ b/winml/test/api/LearningModelAPITest.cpp @@ -1,7 +1,6 @@ #include "testPch.h" - +#include "LearningModelAPITest.h" #include "APITest.h" - #include #include #include @@ -15,107 +14,96 @@ using namespace winrt::Windows::Media; using namespace winrt::Windows::Storage; using namespace winrt::Windows::Storage::Streams; -class LearningModelAPITest : public APITest -{ -protected: - LearningModelAPITest() { - init_apartment(); - m_model = nullptr; - m_device = nullptr; - m_session = nullptr; - } -}; - -class LearningModelAPITestGpu : public LearningModelAPITest -{ -protected: - void SetUp() override - { - GPUTEST - } -}; +static void LearningModelAPITestSetup() { + init_apartment(); +} -TEST_F(LearningModelAPITest, CreateModelFromFilePath) -{ - EXPECT_NO_THROW(LoadModel(L"squeezenet_modifiedforruntimestests.onnx")); +static void LearningModelAPITestGpuSetup() { + GPUTEST; + init_apartment(); } -TEST_F(LearningModelAPITest, CreateModelFromIStorage) -{ - std::wstring path = FileHelpers::GetModulePath() + L"squeezenet_modifiedforruntimestests.onnx"; - auto storageFile = winrt::Windows::Storage::StorageFile::GetFileFromPathAsync(path).get(); - EXPECT_NO_THROW(m_model = LearningModel::LoadFromStorageFileAsync(storageFile).get()); - EXPECT_TRUE(m_model != nullptr); - - // check the author so we know the model was populated correctly. - std::wstring author(m_model.Author()); - EXPECT_EQ(L"onnx-caffe2", author); +static void CreateModelFromFilePath() { + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"squeezenet_modifiedforruntimestests.onnx", learningModel)); } -TEST_F(LearningModelAPITest, CreateModelFromIStorageOutsideCwd) -{ - std::wstring path = FileHelpers::GetModulePath() + L"ModelSubdirectory\\ModelInSubdirectory.onnx"; - auto storageFile = winrt::Windows::Storage::StorageFile::GetFileFromPathAsync(path).get(); - EXPECT_NO_THROW(m_model = LearningModel::LoadFromStorageFileAsync(storageFile).get()); - EXPECT_TRUE(m_model != nullptr); - - // check the author so we know the model was populated correctly. - std::wstring author(m_model.Author()); - EXPECT_EQ(L"onnx-caffe2", author); +static void CreateModelFromIStorage() { + std::wstring path = FileHelpers::GetModulePath() + L"squeezenet_modifiedforruntimestests.onnx"; + auto storageFile = winrt::Windows::Storage::StorageFile::GetFileFromPathAsync(path).get(); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(learningModel = LearningModel::LoadFromStorageFileAsync(storageFile).get()); + WINML_EXPECT_TRUE(learningModel != nullptr); + + // check the author so we know the model was populated correctly. + std::wstring author(learningModel.Author()); + WINML_EXPECT_EQUAL(L"onnx-caffe2", author); } -TEST_F(LearningModelAPITest, CreateModelFromIStream) -{ - std::wstring path = FileHelpers::GetModulePath() + L"squeezenet_modifiedforruntimestests.onnx"; - auto storageFile = winrt::Windows::Storage::StorageFile::GetFileFromPathAsync(path).get(); - winrt::Windows::Storage::Streams::IRandomAccessStreamReference streamref; - storageFile.as(streamref); +static void CreateModelFromIStorageOutsideCwd() { + std::wstring path = FileHelpers::GetModulePath() + L"ModelSubdirectory\\ModelInSubdirectory.onnx"; + auto storageFile = winrt::Windows::Storage::StorageFile::GetFileFromPathAsync(path).get(); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(learningModel = LearningModel::LoadFromStorageFileAsync(storageFile).get()); + WINML_EXPECT_TRUE(learningModel != nullptr); - EXPECT_NO_THROW(m_model = LearningModel::LoadFromStreamAsync(streamref).get()); - EXPECT_TRUE(m_model != nullptr); + // check the author so we know the model was populated correctly. + std::wstring author(learningModel.Author()); + WINML_EXPECT_EQUAL(L"onnx-caffe2", author); +} - // check the author so we know the model was populated correctly. - std::wstring author(m_model.Author()); - EXPECT_EQ(L"onnx-caffe2", author); +static void CreateModelFromIStream() { + std::wstring path = FileHelpers::GetModulePath() + L"squeezenet_modifiedforruntimestests.onnx"; + auto storageFile = winrt::Windows::Storage::StorageFile::GetFileFromPathAsync(path).get(); + winrt::Windows::Storage::Streams::IRandomAccessStreamReference streamref; + storageFile.as(streamref); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(learningModel = LearningModel::LoadFromStreamAsync(streamref).get()); + WINML_EXPECT_TRUE(learningModel != nullptr); + + // check the author so we know the model was populated correctly. + std::wstring author(learningModel.Author()); + WINML_EXPECT_EQUAL(L"onnx-caffe2", author); } -TEST_F(LearningModelAPITest, GetAuthor) -{ - EXPECT_NO_THROW(LoadModel(L"squeezenet_modifiedforruntimestests.onnx")); - std::wstring author(m_model.Author()); - EXPECT_EQ(L"onnx-caffe2", author); +static void ModelGetAuthor() { + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"squeezenet_modifiedforruntimestests.onnx", learningModel)); + std::wstring author(learningModel.Author()); + WINML_EXPECT_EQUAL(L"onnx-caffe2", author); } -TEST_F(LearningModelAPITest, GetName) -{ - EXPECT_NO_THROW(LoadModel(L"squeezenet_modifiedforruntimestests.onnx")); - std::wstring name(m_model.Name()); - EXPECT_EQ(L"squeezenet_old", name); +static void ModelGetName() { + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"squeezenet_modifiedforruntimestests.onnx", learningModel)); + std::wstring name(learningModel.Name()); + WINML_EXPECT_EQUAL(L"squeezenet_old", name); } -TEST_F(LearningModelAPITest, GetDomain) -{ - EXPECT_NO_THROW(LoadModel(L"squeezenet_modifiedforruntimestests.onnx")); - std::wstring domain(m_model.Domain()); - EXPECT_EQ(L"test-domain", domain); +static void ModelGetDomain() { + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"squeezenet_modifiedforruntimestests.onnx", learningModel)); + std::wstring domain(learningModel.Domain()); + WINML_EXPECT_EQUAL(L"test-domain", domain); } -TEST_F(LearningModelAPITest, GetDescription) -{ - EXPECT_NO_THROW(LoadModel(L"squeezenet_modifiedforruntimestests.onnx")); - std::wstring description(m_model.Description()); - EXPECT_EQ(L"test-doc_string", description); +static void ModelGetDescription() { + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"squeezenet_modifiedforruntimestests.onnx", learningModel)); + std::wstring description(learningModel.Description()); + WINML_EXPECT_EQUAL(L"test-doc_string", description); } -TEST_F(LearningModelAPITest, GetVersion) -{ - EXPECT_NO_THROW(LoadModel(L"squeezenet_modifiedforruntimestests.onnx")); - int64_t version(m_model.Version()); - (void)(version); +static void ModelGetVersion() { + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"squeezenet_modifiedforruntimestests.onnx", learningModel)); + int64_t version(learningModel.Version()); + (void)(version); } typedef std::vector> Metadata; +/* class MetadataTest : public LearningModelAPITest, public testing::WithParamInterface> {}; @@ -125,16 +113,16 @@ TEST_P(MetadataTest, GetMetaData) std::vector> keyValuePairs; tie(fileName, keyValuePairs) = GetParam(); - EXPECT_NO_THROW(LoadModel(fileName.c_str())); - EXPECT_TRUE(m_model.Metadata() != nullptr); - EXPECT_EQ(keyValuePairs.size(), m_model.Metadata().Size()); + WINML_EXPECT_NO_THROW(LoadModel(fileName.c_str())); + WINML_EXPECT_TRUE(m_model.Metadata() != nullptr); + WINML_EXPECT_EQUAL(keyValuePairs.size(), m_model.Metadata().Size()); auto iter = m_model.Metadata().First(); for (auto& keyValue : keyValuePairs) { - EXPECT_TRUE(iter.HasCurrent()); - EXPECT_EQ(keyValue.first, std::wstring(iter.Current().Key())); - EXPECT_EQ(keyValue.second, std::wstring(iter.Current().Value())); + WINML_EXPECT_TRUE(iter.HasCurrent()); + WINML_EXPECT_EQUAL(keyValue.first, std::wstring(iter.Current().Key())); + WINML_EXPECT_EQUAL(keyValue.second, std::wstring(iter.Current().Value())); iter.MoveNext(); } } @@ -147,122 +135,141 @@ INSTANTIATE_TEST_SUITE_P( std::pair(L"modelWithMetaData.onnx", Metadata{{L"thisisalongkey", L"thisisalongvalue"}}), std::pair(L"modelWith2MetaData.onnx", Metadata{{L"thisisalongkey", L"thisisalongvalue"}, {L"key2", L"val2"}}) )); +*/ -TEST_F(LearningModelAPITest, EnumerateInputs) -{ - EXPECT_NO_THROW(LoadModel(L"squeezenet_modifiedforruntimestests.onnx")); +static void EnumerateInputs() { + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"squeezenet_modifiedforruntimestests.onnx", learningModel)); - // purposely don't cache "InputFeatures" in order to exercise calling it multiple times - EXPECT_TRUE(m_model.InputFeatures().First().HasCurrent()); + // purposely don't cache "InputFeatures" in order to exercise calling it multiple times + WINML_EXPECT_TRUE(learningModel.InputFeatures().First().HasCurrent()); - std::wstring name(m_model.InputFeatures().First().Current().Name()); - EXPECT_EQ(L"data_0", name); + std::wstring name(learningModel.InputFeatures().First().Current().Name()); + WINML_EXPECT_EQUAL(L"data_0", name); - // make sure it's either tensor or image - TensorFeatureDescriptor tensorDescriptor = nullptr; - m_model.InputFeatures().First().Current().try_as(tensorDescriptor); - if (tensorDescriptor == nullptr) - { - ImageFeatureDescriptor imageDescriptor = nullptr; - EXPECT_NO_THROW(m_model.InputFeatures().First().Current().as(imageDescriptor)); - } + // make sure it's either tensor or image + TensorFeatureDescriptor tensorDescriptor = nullptr; + learningModel.InputFeatures().First().Current().try_as(tensorDescriptor); + if (tensorDescriptor == nullptr) { + ImageFeatureDescriptor imageDescriptor = nullptr; + WINML_EXPECT_NO_THROW(learningModel.InputFeatures().First().Current().as(imageDescriptor)); + } - auto modelDataKind = tensorDescriptor.TensorKind(); - EXPECT_EQ(TensorKind::Float, modelDataKind); + auto modelDataKind = tensorDescriptor.TensorKind(); + WINML_EXPECT_EQUAL(TensorKind::Float, modelDataKind); - EXPECT_TRUE(tensorDescriptor.IsRequired()); + WINML_EXPECT_TRUE(tensorDescriptor.IsRequired()); - std::vector expectedShapes = { 1,3,224,224 }; - EXPECT_EQ(expectedShapes.size(), tensorDescriptor.Shape().Size()); - for (uint32_t j = 0; j < tensorDescriptor.Shape().Size(); j++) - { - EXPECT_EQ(expectedShapes.at(j), tensorDescriptor.Shape().GetAt(j)); - } + std::vector expectedShapes = {1, 3, 224, 224}; + WINML_EXPECT_EQUAL(expectedShapes.size(), tensorDescriptor.Shape().Size()); + for (uint32_t j = 0; j < tensorDescriptor.Shape().Size(); j++) { + WINML_EXPECT_EQUAL(expectedShapes.at(j), tensorDescriptor.Shape().GetAt(j)); + } - auto first = m_model.InputFeatures().First(); - first.MoveNext(); - EXPECT_FALSE(first.HasCurrent()); + auto first = learningModel.InputFeatures().First(); + first.MoveNext(); + WINML_EXPECT_FALSE(first.HasCurrent()); } -TEST_F(LearningModelAPITest, EnumerateOutputs) -{ - EXPECT_NO_THROW(LoadModel(L"squeezenet_modifiedforruntimestests.onnx")); +static void EnumerateOutputs() { + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"squeezenet_modifiedforruntimestests.onnx", learningModel)); - // purposely don't cache "OutputFeatures" in order to exercise calling it multiple times - std::wstring name(m_model.OutputFeatures().First().Current().Name()); - EXPECT_EQ(L"softmaxout_1", name); + // purposely don't cache "OutputFeatures" in order to exercise calling it multiple times + std::wstring name(learningModel.OutputFeatures().First().Current().Name()); + WINML_EXPECT_EQUAL(L"softmaxout_1", name); - TensorFeatureDescriptor tensorDescriptor = nullptr; - EXPECT_NO_THROW(m_model.OutputFeatures().First().Current().as(tensorDescriptor)); - EXPECT_TRUE(tensorDescriptor != nullptr); + TensorFeatureDescriptor tensorDescriptor = nullptr; + WINML_EXPECT_NO_THROW(learningModel.OutputFeatures().First().Current().as(tensorDescriptor)); + WINML_EXPECT_TRUE(tensorDescriptor != nullptr); - auto tensorName = tensorDescriptor.Name(); - EXPECT_EQ(L"softmaxout_1", tensorName); + auto tensorName = tensorDescriptor.Name(); + WINML_EXPECT_EQUAL(L"softmaxout_1", tensorName); - auto modelDataKind = tensorDescriptor.TensorKind(); - EXPECT_EQ(TensorKind::Float, modelDataKind); + auto modelDataKind = tensorDescriptor.TensorKind(); + WINML_EXPECT_EQUAL(TensorKind::Float, modelDataKind); - EXPECT_TRUE(tensorDescriptor.IsRequired()); + WINML_EXPECT_TRUE(tensorDescriptor.IsRequired()); - std::vector expectedShapes = { 1, 1000, 1, 1 }; - EXPECT_EQ(expectedShapes.size(), tensorDescriptor.Shape().Size()); - for (uint32_t j = 0; j < tensorDescriptor.Shape().Size(); j++) - { - EXPECT_EQ(expectedShapes.at(j), tensorDescriptor.Shape().GetAt(j)); - } + std::vector expectedShapes = {1, 1000, 1, 1}; + WINML_EXPECT_EQUAL(expectedShapes.size(), tensorDescriptor.Shape().Size()); + for (uint32_t j = 0; j < tensorDescriptor.Shape().Size(); j++) { + WINML_EXPECT_EQUAL(expectedShapes.at(j), tensorDescriptor.Shape().GetAt(j)); + } - auto first = m_model.OutputFeatures().First(); - first.MoveNext(); - EXPECT_FALSE(first.HasCurrent()); + auto first = learningModel.OutputFeatures().First(); + first.MoveNext(); + WINML_EXPECT_FALSE(first.HasCurrent()); } -TEST_F(LearningModelAPITest, CloseModelCheckMetadata) -{ - EXPECT_NO_THROW(LoadModel(L"squeezenet_modifiedforruntimestests.onnx")); - EXPECT_NO_THROW(m_model.Close()); - std::wstring author(m_model.Author()); - EXPECT_EQ(L"onnx-caffe2", author); - std::wstring name(m_model.Name()); - EXPECT_EQ(L"squeezenet_old", name); - std::wstring domain(m_model.Domain()); - EXPECT_EQ(L"test-domain", domain); - std::wstring description(m_model.Description()); - EXPECT_EQ(L"test-doc_string", description); - int64_t version(m_model.Version()); - EXPECT_EQ(123456, version); +static void CloseModelCheckMetadata() { + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"squeezenet_modifiedforruntimestests.onnx", learningModel)); + WINML_EXPECT_NO_THROW(learningModel.Close()); + std::wstring author(learningModel.Author()); + WINML_EXPECT_EQUAL(L"onnx-caffe2", author); + std::wstring name(learningModel.Name()); + WINML_EXPECT_EQUAL(L"squeezenet_old", name); + std::wstring domain(learningModel.Domain()); + WINML_EXPECT_EQUAL(L"test-domain", domain); + std::wstring description(learningModel.Description()); + WINML_EXPECT_EQUAL(L"test-doc_string", description); + int64_t version(learningModel.Version()); + WINML_EXPECT_EQUAL(123456, version); } -TEST_F(LearningModelAPITestGpu, CloseModelCheckEval) -{ - EXPECT_NO_THROW(LoadModel(L"model.onnx")); - LearningModelSession session = nullptr; - EXPECT_NO_THROW(session = LearningModelSession(m_model)); - EXPECT_NO_THROW(m_model.Close()); - - std::wstring fullImagePath = FileHelpers::GetModulePath() + L"kitten_224.png"; - StorageFile imagefile = StorageFile::GetFileFromPathAsync(fullImagePath).get(); - IRandomAccessStream stream = imagefile.OpenAsync(FileAccessMode::Read).get(); - SoftwareBitmap softwareBitmap = (BitmapDecoder::CreateAsync(stream).get()).GetSoftwareBitmapAsync().get(); - VideoFrame frame = VideoFrame::CreateWithSoftwareBitmap(softwareBitmap); - - LearningModelBinding binding = nullptr; - EXPECT_NO_THROW(binding = LearningModelBinding(session)); - EXPECT_NO_THROW(binding.Bind(m_model.InputFeatures().First().Current().Name(), frame)); - - EXPECT_NO_THROW(session.Evaluate(binding, L"")); +static void CloseModelCheckEval() { + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"model.onnx", learningModel)); + LearningModelSession session = nullptr; + WINML_EXPECT_NO_THROW(session = LearningModelSession(learningModel)); + WINML_EXPECT_NO_THROW(learningModel.Close()); + + std::wstring fullImagePath = FileHelpers::GetModulePath() + L"kitten_224.png"; + StorageFile imagefile = StorageFile::GetFileFromPathAsync(fullImagePath).get(); + IRandomAccessStream stream = imagefile.OpenAsync(FileAccessMode::Read).get(); + SoftwareBitmap softwareBitmap = (BitmapDecoder::CreateAsync(stream).get()).GetSoftwareBitmapAsync().get(); + VideoFrame frame = VideoFrame::CreateWithSoftwareBitmap(softwareBitmap); + + LearningModelBinding binding = nullptr; + WINML_EXPECT_NO_THROW(binding = LearningModelBinding(session)); + WINML_EXPECT_NO_THROW(binding.Bind(learningModel.InputFeatures().First().Current().Name(), frame)); + + WINML_EXPECT_NO_THROW(session.Evaluate(binding, L"")); } -TEST_F(LearningModelAPITest, CloseModelNoNewSessions) -{ - EXPECT_NO_THROW(LoadModel(L"model.onnx")); - EXPECT_NO_THROW(m_model.Close()); - LearningModelSession session = nullptr; - EXPECT_THROW( - try { - session = LearningModelSession(m_model); - } catch (const winrt::hresult_error& e) { - EXPECT_EQ(E_INVALIDARG, e.code()); - throw; - } - , winrt::hresult_error); +static void CloseModelNoNewSessions() { + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"model.onnx", learningModel)); + WINML_EXPECT_NO_THROW(learningModel.Close()); + LearningModelSession session = nullptr; + WINML_EXPECT_THROW_SPECIFIC( + session = LearningModelSession(learningModel);, + winrt::hresult_error, + [](const winrt::hresult_error& e) -> bool { + return e.code() == E_INVALIDARG; + }); } + +const LearningModelApiTestApi& getapi() { + static constexpr LearningModelApiTestApi api = + { + LearningModelAPITestSetup, + LearningModelAPITestGpuSetup, + CreateModelFromFilePath, + CreateModelFromIStorage, + CreateModelFromIStorageOutsideCwd, + CreateModelFromIStream, + ModelGetAuthor, + ModelGetName, + ModelGetDomain, + ModelGetDescription, + ModelGetVersion, + EnumerateInputs, + EnumerateOutputs, + CloseModelCheckMetadata, + CloseModelCheckEval, + CloseModelNoNewSessions + }; + return api; +} \ No newline at end of file diff --git a/winml/test/api/LearningModelAPITest.h b/winml/test/api/LearningModelAPITest.h new file mode 100644 index 0000000000000..4a723ab2d9d29 --- /dev/null +++ b/winml/test/api/LearningModelAPITest.h @@ -0,0 +1,41 @@ +#include "test.h" +struct LearningModelApiTestApi +{ + SetupTest LearningModelAPITestSetup; + SetupTest LearningModelAPITestGpuSetup; + VoidTest CreateModelFromFilePath; + VoidTest CreateModelFromIStorage; + VoidTest CreateModelFromIStorageOutsideCwd; + VoidTest CreateModelFromIStream; + VoidTest ModelGetAuthor; + VoidTest ModelGetName; + VoidTest ModelGetDomain; + VoidTest ModelGetDescription; + VoidTest ModelGetVersion; + VoidTest EnumerateInputs; + VoidTest EnumerateOutputs; + VoidTest CloseModelCheckMetadata; + VoidTest CloseModelCheckEval; + VoidTest CloseModelNoNewSessions; +}; +const LearningModelApiTestApi& getapi(); + +WINML_TEST_CLASS_BEGIN_WITH_SETUP(LearningModelAPITest, LearningModelAPITestSetup) +WINML_TEST(LearningModelAPITest, CreateModelFromFilePath) +WINML_TEST(LearningModelAPITest, CreateModelFromIStorage) +WINML_TEST(LearningModelAPITest, CreateModelFromIStorageOutsideCwd) +WINML_TEST(LearningModelAPITest, CreateModelFromIStream) +WINML_TEST(LearningModelAPITest, ModelGetAuthor) +WINML_TEST(LearningModelAPITest, ModelGetName) +WINML_TEST(LearningModelAPITest, ModelGetDomain) +WINML_TEST(LearningModelAPITest, ModelGetDescription) +WINML_TEST(LearningModelAPITest, ModelGetVersion) +WINML_TEST(LearningModelAPITest, EnumerateInputs) +WINML_TEST(LearningModelAPITest, EnumerateOutputs) +WINML_TEST(LearningModelAPITest, CloseModelCheckMetadata) +WINML_TEST(LearningModelAPITest, CloseModelNoNewSessions) +WINML_TEST_CLASS_END() + +WINML_TEST_CLASS_BEGIN_WITH_SETUP(LearningModelAPITestGpu, LearningModelAPITestGpuSetup) +WINML_TEST(LearningModelAPITestGpu, CloseModelCheckEval) +WINML_TEST_CLASS_END() \ No newline at end of file diff --git a/winml/test/api/LearningModelBindingAPITest.cpp b/winml/test/api/LearningModelBindingAPITest.cpp index 969c28ba63937..99d9dc6e9b251 100644 --- a/winml/test/api/LearningModelBindingAPITest.cpp +++ b/winml/test/api/LearningModelBindingAPITest.cpp @@ -1,13 +1,14 @@ #include "testPch.h" #include "APITest.h" +#include "LearningModelBindingAPITest.h" #include "SqueezeNetValidator.h" #include #include #include "winrt/Windows.Storage.h" #include "DeviceHelpers.h" - +#include using namespace winrt; using namespace winrt::Windows::AI::MachineLearning; using namespace winrt::Windows::Foundation::Collections; @@ -15,25 +16,22 @@ using namespace winrt::Windows::Graphics::Imaging; using namespace winrt::Windows::Media; using namespace winrt::Windows::Storage; -class LearningModelBindingAPITest : public APITest -{}; +static void LearningModelBindingAPITestSetup() { + init_apartment(); +} -class LearningModelBindingAPITestGpu : public LearningModelBindingAPITest -{ -protected: - void SetUp() override - { - GPUTEST - } -}; +static void LearningModelBindingAPITestGpuSetup() { + GPUTEST; + init_apartment(); +} -TEST_F(LearningModelBindingAPITest, CpuSqueezeNet) +static void CpuSqueezeNet() { std::string cpuInstance("CPU"); WinML::Engine::Test::ModelValidator::SqueezeNet(cpuInstance, LearningModelDeviceKind::Cpu, /*dataTolerance*/ 0.00001f, false); } -TEST_F(LearningModelBindingAPITest, CpuSqueezeNetEmptyOutputs) +static void CpuSqueezeNetEmptyOutputs() { std::string cpuInstance("CPU"); WinML::Engine::Test::ModelValidator::SqueezeNet( @@ -44,7 +42,7 @@ TEST_F(LearningModelBindingAPITest, CpuSqueezeNetEmptyOutputs) OutputBindingStrategy::Empty); } -TEST_F(LearningModelBindingAPITest, CpuSqueezeNetUnboundOutputs) +static void CpuSqueezeNetUnboundOutputs() { std::string cpuInstance("CPU"); WinML::Engine::Test::ModelValidator::SqueezeNet( @@ -55,7 +53,7 @@ TEST_F(LearningModelBindingAPITest, CpuSqueezeNetUnboundOutputs) OutputBindingStrategy::Unbound); } -TEST_F(LearningModelBindingAPITest, CpuSqueezeNetBindInputTensorAsInspectable) +static void CpuSqueezeNetBindInputTensorAsInspectable() { std::string cpuInstance("CPU"); WinML::Engine::Test::ModelValidator::SqueezeNet( @@ -67,27 +65,28 @@ TEST_F(LearningModelBindingAPITest, CpuSqueezeNetBindInputTensorAsInspectable) true /* bind inputs as inspectables */); } -TEST_F(LearningModelBindingAPITest, CastMapInt64) +static void CastMapInt64() { - EXPECT_NO_THROW(LoadModel(L"castmap-int64.onnx")); + WINML_EXPECT_NO_THROW(LearningModel::LoadFromFilePath(FileHelpers::GetModulePath() + L"castmap-int64.onnx")); // TODO: Check Descriptor } -TEST_F(LearningModelBindingAPITest, DictionaryVectorizerMapInt64) +static void DictionaryVectorizerMapInt64() { - EXPECT_NO_THROW(LoadModel(L"dictvectorizer-int64.onnx")); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"dictvectorizer-int64.onnx", learningModel)); - auto inputDescriptor = m_model.InputFeatures().First().Current(); - EXPECT_TRUE(inputDescriptor.Kind() == LearningModelFeatureKind::Map); + auto inputDescriptor = learningModel.InputFeatures().First().Current(); + WINML_EXPECT_TRUE(inputDescriptor.Kind() == LearningModelFeatureKind::Map); auto mapDescriptor = inputDescriptor.as(); - EXPECT_TRUE(mapDescriptor.KeyKind() == TensorKind::Int64); - EXPECT_TRUE(mapDescriptor.ValueDescriptor().Kind() == LearningModelFeatureKind::Tensor); + WINML_EXPECT_TRUE(mapDescriptor.KeyKind() == TensorKind::Int64); + WINML_EXPECT_TRUE(mapDescriptor.ValueDescriptor().Kind() == LearningModelFeatureKind::Tensor); auto tensorDescriptor = mapDescriptor.ValueDescriptor().as(); // empty size means tensor of scalar value - EXPECT_TRUE(tensorDescriptor.Shape().Size() == 0); - EXPECT_TRUE(tensorDescriptor.TensorKind() == TensorKind::Float); + WINML_EXPECT_TRUE(tensorDescriptor.Shape().Size() == 0); + WINML_EXPECT_TRUE(tensorDescriptor.TensorKind() == TensorKind::Float); - LearningModelSession modelSession(m_model); + LearningModelSession modelSession(learningModel); LearningModelBinding binding(modelSession); std::unordered_map map; map[1] = 1.f; @@ -102,38 +101,39 @@ TEST_F(LearningModelBindingAPITest, DictionaryVectorizerMapInt64) binding.Bind(mapInputName, abiMap); auto mapInputInspectable = abiMap.as(); auto first = binding.First(); - EXPECT_TRUE(first.Current().Key() == mapInputName); - EXPECT_TRUE(first.Current().Value() == mapInputInspectable); - EXPECT_TRUE(binding.Lookup(mapInputName) == mapInputInspectable); + WINML_EXPECT_TRUE(first.Current().Key() == mapInputName); + WINML_EXPECT_TRUE(first.Current().Value() == mapInputInspectable); + WINML_EXPECT_TRUE(binding.Lookup(mapInputName) == mapInputInspectable); // Bind as IMapView auto mapView = abiMap.GetView(); binding.Bind(mapInputName, mapView); mapInputInspectable = mapView.as(); first = binding.First(); - EXPECT_TRUE(first.Current().Key() == mapInputName); - EXPECT_TRUE(first.Current().Value() == mapView); - EXPECT_TRUE(binding.Lookup(mapInputName) == mapView); + WINML_EXPECT_TRUE(first.Current().Key() == mapInputName); + WINML_EXPECT_TRUE(first.Current().Value() == mapView); + WINML_EXPECT_TRUE(binding.Lookup(mapInputName) == mapView); } -TEST_F(LearningModelBindingAPITest, DictionaryVectorizerMapString) +static void DictionaryVectorizerMapString() { - EXPECT_NO_THROW(LoadModel(L"dictvectorizer-string.onnx")); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"dictvectorizer-string.onnx", learningModel)); - auto inputDescriptor = m_model.InputFeatures().First().Current(); - EXPECT_TRUE(inputDescriptor.Kind() == LearningModelFeatureKind::Map); + auto inputDescriptor = learningModel.InputFeatures().First().Current(); + WINML_EXPECT_TRUE(inputDescriptor.Kind() == LearningModelFeatureKind::Map); auto mapDescriptor = inputDescriptor.as(); - EXPECT_TRUE(mapDescriptor.KeyKind() == TensorKind::String); - EXPECT_TRUE(mapDescriptor.ValueDescriptor().Kind() == LearningModelFeatureKind::Tensor); + WINML_EXPECT_TRUE(mapDescriptor.KeyKind() == TensorKind::String); + WINML_EXPECT_TRUE(mapDescriptor.ValueDescriptor().Kind() == LearningModelFeatureKind::Tensor); auto tensorDescriptor = mapDescriptor.ValueDescriptor().as(); // empty size means tensor of scalar value - EXPECT_TRUE(tensorDescriptor.Shape().Size() == 0); - EXPECT_TRUE(tensorDescriptor.TensorKind() == TensorKind::Float); + WINML_EXPECT_TRUE(tensorDescriptor.Shape().Size() == 0); + WINML_EXPECT_TRUE(tensorDescriptor.TensorKind() == TensorKind::Float); - LearningModelSession modelSession(m_model); + LearningModelSession modelSession(learningModel); LearningModelBinding binding(modelSession); std::unordered_map map; map[L"1"] = 1.f; @@ -146,9 +146,9 @@ TEST_F(LearningModelBindingAPITest, DictionaryVectorizerMapString) auto mapInputInspectable = abiMap.as(); auto first = binding.First(); - EXPECT_TRUE(first.Current().Key() == mapInputName); - EXPECT_TRUE(first.Current().Value() == mapInputInspectable); - EXPECT_TRUE(binding.Lookup(mapInputName) == mapInputInspectable); + WINML_EXPECT_TRUE(first.Current().Key() == mapInputName); + WINML_EXPECT_TRUE(first.Current().Value() == mapInputInspectable); + WINML_EXPECT_TRUE(binding.Lookup(mapInputName) == mapInputInspectable); } static void RunZipMapInt64( @@ -157,15 +157,15 @@ static void RunZipMapInt64( { auto outputFeatures = model.OutputFeatures(); auto outputDescriptor = outputFeatures.First().Current(); - EXPECT_TRUE(outputDescriptor.Kind() == LearningModelFeatureKind::Sequence); + WINML_EXPECT_TRUE(outputDescriptor.Kind() == LearningModelFeatureKind::Sequence); auto seqDescriptor = outputDescriptor.as(); auto mapDescriptor = seqDescriptor.ElementDescriptor().as(); - EXPECT_TRUE(mapDescriptor.KeyKind() == TensorKind::Int64); + WINML_EXPECT_TRUE(mapDescriptor.KeyKind() == TensorKind::Int64); - EXPECT_TRUE(mapDescriptor.ValueDescriptor().Kind() == LearningModelFeatureKind::Tensor); + WINML_EXPECT_TRUE(mapDescriptor.ValueDescriptor().Kind() == LearningModelFeatureKind::Tensor); auto tensorDescriptor = mapDescriptor.ValueDescriptor().as(); - EXPECT_TRUE(tensorDescriptor.TensorKind() == TensorKind::Float); + WINML_EXPECT_TRUE(tensorDescriptor.TensorKind() == TensorKind::Float); LearningModelSession session(model); LearningModelBinding binding(session); @@ -199,63 +199,66 @@ static void RunZipMapInt64( // from output binding const auto &out1 = abiOutput.GetAt(0); const auto &out2 = result.Lookup(L"Y").as>().GetAt(0); - SCOPED_TRACE((std::ostringstream() << "size: " << out1.Size()).str()); + WINML_LOG_COMMENT((std::ostringstream() << "size: " << out1.Size()).str()); // check outputs auto iter1 = out1.First(); auto iter2 = out2.First(); for (uint32_t i = 0, size = (uint32_t)inputs.size(); i < size; ++i) { - EXPECT_TRUE(iter1.HasCurrent()); - EXPECT_TRUE(iter2.HasCurrent()); + WINML_EXPECT_TRUE(iter1.HasCurrent()); + WINML_EXPECT_TRUE(iter2.HasCurrent()); const auto &pair1 = iter1.Current(); const auto &pair2 = iter2.Current(); - SCOPED_TRACE((std::ostringstream() << "key: " << pair1.Key() << ", value: " << pair2.Value()).str()); - EXPECT_TRUE(pair1.Key() == i && pair2.Key() == i); - EXPECT_TRUE(pair1.Value() == inputs[i] && pair2.Value() == inputs[i]); + WINML_LOG_COMMENT((std::ostringstream() << "key: " << pair1.Key() << ", value: " << pair2.Value()).str()); + WINML_EXPECT_TRUE(pair1.Key() == i && pair2.Key() == i); + WINML_EXPECT_TRUE(pair1.Value() == inputs[i] && pair2.Value() == inputs[i]); iter1.MoveNext(); iter2.MoveNext(); } - EXPECT_TRUE(!iter1.HasCurrent()); - EXPECT_TRUE(!iter2.HasCurrent()); + WINML_EXPECT_TRUE(!iter1.HasCurrent()); + WINML_EXPECT_TRUE(!iter2.HasCurrent()); } else { abiOutput = result.Lookup(L"Y").as(); - EXPECT_TRUE(abiOutput.Size() == 1); + WINML_EXPECT_TRUE(abiOutput.Size() == 1); ABIMap map = abiOutput.GetAt(0); - EXPECT_TRUE(map.Size() == 3); - EXPECT_TRUE(map.Lookup(0) == 0.5); - EXPECT_TRUE(map.Lookup(1) == .25); - EXPECT_TRUE(map.Lookup(2) == .125); + WINML_EXPECT_TRUE(map.Size() == 3); + WINML_EXPECT_TRUE(map.Lookup(0) == 0.5); + WINML_EXPECT_TRUE(map.Lookup(1) == .25); + WINML_EXPECT_TRUE(map.Lookup(2) == .125); } } -TEST_F(LearningModelBindingAPITest, ZipMapInt64) +static void ZipMapInt64() { - EXPECT_NO_THROW(LoadModel(L"zipmap-int64.onnx")); - RunZipMapInt64(m_model, OutputBindingStrategy::Bound); + LearningModel learningModel= nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"zipmap-int64.onnx", learningModel)); + RunZipMapInt64(learningModel, OutputBindingStrategy::Bound); } -TEST_F(LearningModelBindingAPITest, ZipMapInt64Unbound) +static void ZipMapInt64Unbound() { - EXPECT_NO_THROW(LoadModel(L"zipmap-int64.onnx")); - RunZipMapInt64(m_model, OutputBindingStrategy::Unbound); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"zipmap-int64.onnx", learningModel)); + RunZipMapInt64(learningModel, OutputBindingStrategy::Unbound); } -TEST_F(LearningModelBindingAPITest, ZipMapString) +static void ZipMapString() { // output constraint: "seq(map(string, float))" or "seq(map(int64, float))" - EXPECT_NO_THROW(LoadModel(L"zipmap-string.onnx")); - auto outputs = m_model.OutputFeatures(); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"zipmap-string.onnx", learningModel)); + auto outputs = learningModel.OutputFeatures(); auto outputDescriptor = outputs.First().Current(); - EXPECT_TRUE(outputDescriptor.Kind() == LearningModelFeatureKind::Sequence); + WINML_EXPECT_TRUE(outputDescriptor.Kind() == LearningModelFeatureKind::Sequence); auto mapDescriptor = outputDescriptor.as().ElementDescriptor().as(); - EXPECT_TRUE(mapDescriptor.KeyKind() == TensorKind::String); - EXPECT_TRUE(mapDescriptor.ValueDescriptor().Kind() == LearningModelFeatureKind::Tensor); + WINML_EXPECT_TRUE(mapDescriptor.KeyKind() == TensorKind::String); + WINML_EXPECT_TRUE(mapDescriptor.ValueDescriptor().Kind() == LearningModelFeatureKind::Tensor); auto tensorDescriptor = mapDescriptor.ValueDescriptor().as(); - EXPECT_TRUE(tensorDescriptor.TensorKind() == TensorKind::Float); + WINML_EXPECT_TRUE(tensorDescriptor.TensorKind() == TensorKind::Float); - LearningModelSession session(m_model); + LearningModelSession session(learningModel); LearningModelBinding binding(session); std::vector inputs = { 0.5f, 0.25f, 0.125f }; @@ -274,27 +277,27 @@ TEST_F(LearningModelBindingAPITest, ZipMapString) // from output binding const auto &out1 = ABIOutput.GetAt(0); const auto &out2 = result.Lookup(L"Y").as>().GetAt(0); - SCOPED_TRACE((std::ostringstream() << "size: " << out1.Size()).str()); + WINML_LOG_COMMENT((std::ostringstream() << "size: " << out1.Size()).str()); // single key,value pair for each map auto iter1 = out1.First(); auto iter2 = out2.First(); for (uint32_t i = 0, size = (uint32_t)inputs.size(); i < size; ++i) { - EXPECT_TRUE(iter2.HasCurrent()); + WINML_EXPECT_TRUE(iter2.HasCurrent()); const auto &pair1 = iter1.Current(); const auto &pair2 = iter2.Current(); - SCOPED_TRACE((std::ostringstream() << "key: " << pair1.Key().c_str() << ", value " << pair2.Value()).str()); - EXPECT_TRUE(std::wstring(pair1.Key().c_str()).compare(labels[i]) == 0); - EXPECT_TRUE(std::wstring(pair2.Key().c_str()).compare(labels[i]) == 0); - EXPECT_TRUE(pair1.Value() == inputs[i] && pair2.Value() == inputs[i]); + WINML_LOG_COMMENT((std::ostringstream() << "key: " << pair1.Key().c_str() << ", value " << pair2.Value()).str()); + WINML_EXPECT_TRUE(std::wstring(pair1.Key().c_str()).compare(labels[i]) == 0); + WINML_EXPECT_TRUE(std::wstring(pair2.Key().c_str()).compare(labels[i]) == 0); + WINML_EXPECT_TRUE(pair1.Value() == inputs[i] && pair2.Value() == inputs[i]); iter1.MoveNext(); iter2.MoveNext(); } - EXPECT_TRUE(!iter1.HasCurrent()); - EXPECT_TRUE(!iter2.HasCurrent()); + WINML_EXPECT_TRUE(!iter1.HasCurrent()); + WINML_EXPECT_TRUE(!iter2.HasCurrent()); } -TEST_F(LearningModelBindingAPITestGpu, GpuSqueezeNet) +static void GpuSqueezeNet() { std::string gpuInstance("GPU"); WinML::Engine::Test::ModelValidator::SqueezeNet( @@ -303,7 +306,7 @@ TEST_F(LearningModelBindingAPITestGpu, GpuSqueezeNet) /*dataTolerance*/ 0.00001f); } -TEST_F(LearningModelBindingAPITestGpu, GpuSqueezeNetEmptyOutputs) +static void GpuSqueezeNetEmptyOutputs() { std::string gpuInstance("GPU"); WinML::Engine::Test::ModelValidator::SqueezeNet( @@ -314,7 +317,7 @@ TEST_F(LearningModelBindingAPITestGpu, GpuSqueezeNetEmptyOutputs) OutputBindingStrategy::Empty); } -TEST_F(LearningModelBindingAPITestGpu, GpuSqueezeNetUnboundOutputs) +static void GpuSqueezeNetUnboundOutputs() { std::string gpuInstance("GPU"); WinML::Engine::Test::ModelValidator::SqueezeNet( @@ -326,44 +329,48 @@ TEST_F(LearningModelBindingAPITestGpu, GpuSqueezeNetUnboundOutputs) } // Validates that when the input image is the same as the model expects, the binding step is executed correctly. -TEST_F(LearningModelBindingAPITestGpu, ImageBindingDimensions) +static void ImageBindingDimensions() { - LearningModelBinding m_binding = nullptr; + LearningModelBinding learningModelBinding = nullptr; + LearningModel learningModel = nullptr; + LearningModelSession learningModelSession = nullptr; + LearningModelDevice leraningModelDevice = nullptr; std::wstring filePath = FileHelpers::GetModulePath() + L"model.onnx"; // load a model with expected input size: 224 x 224 - EXPECT_NO_THROW(m_device = LearningModelDevice(LearningModelDeviceKind::Default)); - EXPECT_NO_THROW(m_model = LearningModel::LoadFromFilePath(filePath)); - EXPECT_TRUE(m_model != nullptr); - EXPECT_NO_THROW(m_session = LearningModelSession(m_model, m_device)); - EXPECT_NO_THROW(m_binding = LearningModelBinding(m_session)); + WINML_EXPECT_NO_THROW(leraningModelDevice = LearningModelDevice(LearningModelDeviceKind::Default)); + WINML_EXPECT_NO_THROW(learningModel = LearningModel::LoadFromFilePath(filePath)); + WINML_EXPECT_TRUE(learningModel != nullptr); + WINML_EXPECT_NO_THROW(learningModelSession = LearningModelSession(learningModel, leraningModelDevice)); + WINML_EXPECT_NO_THROW(learningModelBinding = LearningModelBinding(learningModelSession)); // Create input images and execute bind // Test Case 1: both width and height are larger than model expects VideoFrame inputImage1(BitmapPixelFormat::Rgba8, 1000, 1000); ImageFeatureValue inputTensor = ImageFeatureValue::CreateFromVideoFrame(inputImage1); - EXPECT_NO_THROW(m_binding.Bind(L"data_0", inputTensor)); + WINML_EXPECT_NO_THROW(learningModelBinding.Bind(L"data_0", inputTensor)); // Test Case 2: only height is larger, while width is smaller VideoFrame inputImage2(BitmapPixelFormat::Rgba8, 20, 1000); inputTensor = ImageFeatureValue::CreateFromVideoFrame(inputImage2); - EXPECT_NO_THROW(m_binding.Bind(L"data_0", inputTensor)); + WINML_EXPECT_NO_THROW(learningModelBinding.Bind(L"data_0", inputTensor)); // Test Case 3: only width is larger, while height is smaller VideoFrame inputImage3(BitmapPixelFormat::Rgba8, 1000, 20); inputTensor = ImageFeatureValue::CreateFromVideoFrame(inputImage3); - EXPECT_NO_THROW(m_binding.Bind(L"data_0", inputTensor)); + WINML_EXPECT_NO_THROW(learningModelBinding.Bind(L"data_0", inputTensor)); // Test Case 4: both width and height are smaller than model expects VideoFrame inputImage4(BitmapPixelFormat::Rgba8, 20, 20); inputTensor = ImageFeatureValue::CreateFromVideoFrame(inputImage4); - EXPECT_NO_THROW(m_binding.Bind(L"data_0", inputTensor)); + WINML_EXPECT_NO_THROW(learningModelBinding.Bind(L"data_0", inputTensor)); } -TEST_F(LearningModelBindingAPITestGpu, VerifyInvalidBindExceptions) +static void VerifyInvalidBindExceptions() { - EXPECT_NO_THROW(LoadModel(L"zipmap-int64.onnx")); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"zipmap-int64.onnx", learningModel)); - LearningModelSession session(m_model); + LearningModelSession session(learningModel); LearningModelBinding binding(session); std::vector inputs = { 0.5f, 0.25f, 0.125f }; @@ -384,47 +391,47 @@ TEST_F(LearningModelBindingAPITestGpu, VerifyInvalidBindExceptions) // Bind invalid image as tensorfloat input auto image = FileHelpers::LoadImageFeatureValue(L"227x227.png"); - EXPECT_THROW_SPECIFIC(binding.Bind(L"X", image), winrt::hresult_error, ensureWinmlSizeMismatch); + WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"X", image), winrt::hresult_error, ensureWinmlSizeMismatch); // Bind invalid map as tensorfloat input std::unordered_map map; auto abiMap = winrt::single_threaded_map(std::move(map)); - EXPECT_THROW_SPECIFIC(binding.Bind(L"X", abiMap), winrt::hresult_error, ensureWinmlInvalidBinding); + WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"X", abiMap), winrt::hresult_error, ensureWinmlInvalidBinding); // Bind invalid sequence as tensorfloat input std::vector sequence; auto abiSequence = winrt::single_threaded_vector(std::move(sequence)); - EXPECT_THROW_SPECIFIC(binding.Bind(L"X", abiSequence), winrt::hresult_error, ensureWinmlInvalidBinding); + WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"X", abiSequence), winrt::hresult_error, ensureWinmlInvalidBinding); // Bind invalid tensor size as tensorfloat input auto tensorBoolean = TensorBoolean::Create(); - EXPECT_THROW_SPECIFIC(binding.Bind(L"X", tensorBoolean), winrt::hresult_error, ensureWinmlInvalidBinding); + WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"X", tensorBoolean), winrt::hresult_error, ensureWinmlInvalidBinding); // Bind invalid tensor shape as tensorfloat input auto tensorInvalidShape = TensorFloat::Create(std::vector { 2, 3, 4 }); - EXPECT_THROW_SPECIFIC(binding.Bind(L"X", tensorInvalidShape), winrt::hresult_error, ensureWinmlInvalidBinding); + WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"X", tensorInvalidShape), winrt::hresult_error, ensureWinmlInvalidBinding); /* Verify sequence bindings throw correct bind exceptions */ // Bind invalid image as sequence output - EXPECT_THROW_SPECIFIC(binding.Bind(L"Y", image), winrt::hresult_error, ensureWinmlInvalidBinding); + WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"Y", image), winrt::hresult_error, ensureWinmlInvalidBinding); // Bind invalid map as sequence output - EXPECT_THROW_SPECIFIC(binding.Bind(L"Y", abiMap), winrt::hresult_error, ensureWinmlInvalidBinding); + WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"Y", abiMap), winrt::hresult_error, ensureWinmlInvalidBinding); // Bind invalid sequence as sequence output - EXPECT_THROW_SPECIFIC(binding.Bind(L"Y", abiSequence), winrt::hresult_error, ensureWinmlInvalidBinding); + WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"Y", abiSequence), winrt::hresult_error, ensureWinmlInvalidBinding); // Bind invalid tensor as sequence output - EXPECT_THROW_SPECIFIC(binding.Bind(L"Y", tensorBoolean), winrt::hresult_error, ensureWinmlInvalidBinding); + WINML_EXPECT_THROW_SPECIFIC(binding.Bind(L"Y", tensorBoolean), winrt::hresult_error, ensureWinmlInvalidBinding); /* Verify image bindings throw correct bind exceptions */ - // EXPECT_NO_THROW(LoadModel(L"fns-candy.onnx")); + // WINML_EXPECT_NO_THROW(LoadModel(L"fns-candy.onnx")); // LearningModelSession imageSession(m_model); // LearningModelBinding imageBinding(imageSession); @@ -432,74 +439,77 @@ TEST_F(LearningModelBindingAPITestGpu, VerifyInvalidBindExceptions) // auto inputName = m_model.InputFeatures().First().Current().Name(); // // Bind invalid map as image input - // EXPECT_THROW_SPECIFIC(imageBinding.Bind(inputName, abiMap), winrt::hresult_error, ensureWinmlInvalidBinding); + // WINML_EXPECT_THROW_SPECIFIC(imageBinding.Bind(inputName, abiMap), winrt::hresult_error, ensureWinmlInvalidBinding); // // Bind invalid sequence as image input - // EXPECT_THROW_SPECIFIC(imageBinding.Bind(inputName, abiSequence), winrt::hresult_error, ensureWinmlInvalidBinding); + // WINML_EXPECT_THROW_SPECIFIC(imageBinding.Bind(inputName, abiSequence), winrt::hresult_error, ensureWinmlInvalidBinding); // // Bind invalid tensor type as image input - // EXPECT_THROW_SPECIFIC(imageBinding.Bind(inputName, tensorBoolean), winrt::hresult_error, ensureWinmlInvalidBinding); + // WINML_EXPECT_THROW_SPECIFIC(imageBinding.Bind(inputName, tensorBoolean), winrt::hresult_error, ensureWinmlInvalidBinding); // // Bind invalid tensor size as image input // auto tensorFloat = TensorFloat::Create(std::vector { 1, 1, 100, 100 }); - // EXPECT_THROW_SPECIFIC(imageBinding.Bind(inputName, tensorFloat), winrt::hresult_error, ensureWinmlInvalidBinding); + // WINML_EXPECT_THROW_SPECIFIC(imageBinding.Bind(inputName, tensorFloat), winrt::hresult_error, ensureWinmlInvalidBinding); // // Bind invalid tensor shape as image input - // EXPECT_THROW_SPECIFIC(imageBinding.Bind(inputName, tensorInvalidShape), winrt::hresult_error, ensureWinmlInvalidBinding); + // WINML_EXPECT_THROW_SPECIFIC(imageBinding.Bind(inputName, tensorInvalidShape), winrt::hresult_error, ensureWinmlInvalidBinding); /* Verify map bindings throw correct bind exceptions */ - EXPECT_NO_THROW(LoadModel(L"dictvectorizer-int64.onnx")); + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"dictvectorizer-int64.onnx", learningModel)); - LearningModelSession mapSession(m_model); + LearningModelSession mapSession(learningModel); LearningModelBinding mapBinding(mapSession); - auto inputName = m_model.InputFeatures().First().Current().Name(); + auto inputName = learningModel.InputFeatures().First().Current().Name(); // Bind invalid image as image input auto smallImage = FileHelpers::LoadImageFeatureValue(L"100x100.png"); - EXPECT_THROW_SPECIFIC(mapBinding.Bind(inputName, smallImage), winrt::hresult_error, ensureWinmlInvalidBinding); + WINML_EXPECT_THROW_SPECIFIC(mapBinding.Bind(inputName, smallImage), winrt::hresult_error, ensureWinmlInvalidBinding); // Bind invalid map as image input - EXPECT_THROW_SPECIFIC(mapBinding.Bind(inputName, abiMap), winrt::hresult_error, ensureWinmlInvalidBinding); + WINML_EXPECT_THROW_SPECIFIC(mapBinding.Bind(inputName, abiMap), winrt::hresult_error, ensureWinmlInvalidBinding); // Bind invalid sequence as image input - EXPECT_THROW_SPECIFIC(mapBinding.Bind(inputName, abiSequence), winrt::hresult_error, ensureWinmlInvalidBinding); + WINML_EXPECT_THROW_SPECIFIC(mapBinding.Bind(inputName, abiSequence), winrt::hresult_error, ensureWinmlInvalidBinding); // Bind invalid tensor type as image input - EXPECT_THROW_SPECIFIC(mapBinding.Bind(inputName, tensorBoolean), winrt::hresult_error, ensureWinmlInvalidBinding); + WINML_EXPECT_THROW_SPECIFIC(mapBinding.Bind(inputName, tensorBoolean), winrt::hresult_error, ensureWinmlInvalidBinding); } // Verify that it throws an error when binding an invalid name. -TEST_F(LearningModelBindingAPITestGpu, BindInvalidInputName) +static void BindInvalidInputName() { - LearningModelBinding m_binding = nullptr; + LearningModel learningModel = nullptr; + LearningModelBinding learningModelBinding = nullptr; + LearningModelDevice learningModelDevice = nullptr; + LearningModelSession learningModelSession = nullptr; std::wstring modelPath = FileHelpers::GetModulePath() + L"Add_ImageNet1920.onnx"; - EXPECT_NO_THROW(m_model = LearningModel::LoadFromFilePath(modelPath)); - EXPECT_TRUE(m_model != nullptr); - EXPECT_NO_THROW(m_device = LearningModelDevice(LearningModelDeviceKind::Default)); - EXPECT_NO_THROW(m_session = LearningModelSession(m_model, m_device)); - EXPECT_NO_THROW(m_binding = LearningModelBinding(m_session)); + WINML_EXPECT_NO_THROW(learningModel = LearningModel::LoadFromFilePath(modelPath)); + WINML_EXPECT_TRUE(learningModel != nullptr); + WINML_EXPECT_NO_THROW(learningModelDevice = LearningModelDevice(LearningModelDeviceKind::Default)); + WINML_EXPECT_NO_THROW(learningModelSession = LearningModelSession(learningModel, learningModelDevice)); + WINML_EXPECT_NO_THROW(learningModelBinding = LearningModelBinding(learningModelSession)); VideoFrame iuputImage(BitmapPixelFormat::Rgba8, 1920, 1080); ImageFeatureValue inputTensor = ImageFeatureValue::CreateFromVideoFrame(iuputImage); - auto first = m_model.InputFeatures().First(); + auto first = learningModel.InputFeatures().First(); std::wstring testInvalidName = L"0"; // Verify that testInvalidName is not in model's InputFeatures while (first.HasCurrent()) { - EXPECT_NE(testInvalidName, first.Current().Name()); + WINML_EXPECT_NOT_EQUAL(testInvalidName, first.Current().Name()); first.MoveNext(); } // Bind inputTensor to a valid input name - EXPECT_NO_THROW(m_binding.Bind(L"input_39:0", inputTensor)); + WINML_EXPECT_NO_THROW(learningModelBinding.Bind(L"input_39:0", inputTensor)); // Bind inputTensor to an invalid input name - EXPECT_THROW_SPECIFIC(m_binding.Bind(testInvalidName, inputTensor), + WINML_EXPECT_THROW_SPECIFIC(learningModelBinding.Bind(testInvalidName, inputTensor), winrt::hresult_error, [](const winrt::hresult_error& e) -> bool { @@ -507,15 +517,18 @@ TEST_F(LearningModelBindingAPITestGpu, BindInvalidInputName) }); } -TEST_F(LearningModelBindingAPITest, VerifyOutputAfterEvaluateAsyncCalledTwice) +static void VerifyOutputAfterEvaluateAsyncCalledTwice() { - LearningModelBinding m_binding = nullptr; + LearningModel learningModel = nullptr; + LearningModelBinding learningModelBinding = nullptr; + LearningModelDevice learningModelDevice = nullptr; + LearningModelSession learningModelSession = nullptr; std::wstring filePath = FileHelpers::GetModulePath() + L"relu.onnx"; - EXPECT_NO_THROW(m_device = LearningModelDevice(LearningModelDeviceKind::Default)); - EXPECT_NO_THROW(m_model = LearningModel::LoadFromFilePath(filePath)); - EXPECT_TRUE(m_model != nullptr); - EXPECT_NO_THROW(m_session = LearningModelSession(m_model, m_device)); - EXPECT_NO_THROW(m_binding = LearningModelBinding(m_session)); + WINML_EXPECT_NO_THROW(learningModelDevice = LearningModelDevice(LearningModelDeviceKind::Default)); + WINML_EXPECT_NO_THROW(learningModel = LearningModel::LoadFromFilePath(filePath)); + WINML_EXPECT_TRUE(learningModel != nullptr); + WINML_EXPECT_NO_THROW(learningModelSession = LearningModelSession(learningModel, learningModelDevice)); + WINML_EXPECT_NO_THROW(learningModelBinding = LearningModelBinding(learningModelSession)); auto inputShape = std::vector{ 5 }; auto inputData1 = std::vector{ -50.f, -25.f, 0.f, 25.f, 50.f }; @@ -530,22 +543,22 @@ TEST_F(LearningModelBindingAPITest, VerifyOutputAfterEvaluateAsyncCalledTwice) inputShape, single_threaded_vector(std::move(inputData2)).GetView()); - EXPECT_NO_THROW(m_binding.Bind(L"X", inputValue1)); + WINML_EXPECT_NO_THROW(learningModelBinding.Bind(L"X", inputValue1)); auto outputValue = TensorFloat::Create(); - EXPECT_NO_THROW(m_binding.Bind(L"Y", outputValue)); + WINML_EXPECT_NO_THROW(learningModelBinding.Bind(L"Y", outputValue)); - EXPECT_NO_THROW(m_session.Evaluate(m_binding, L"")); + WINML_EXPECT_NO_THROW(learningModelSession.Evaluate(learningModelBinding, L"")); auto buffer1 = outputValue.GetAsVectorView(); - EXPECT_TRUE(buffer1 != nullptr); + WINML_EXPECT_TRUE(buffer1 != nullptr); // The second evaluation // If we don't bind output again, the output value will not change - EXPECT_NO_THROW(m_binding.Bind(L"X", inputValue2)); - EXPECT_NO_THROW(m_session.Evaluate(m_binding, L"")); + WINML_EXPECT_NO_THROW(learningModelBinding.Bind(L"X", inputValue2)); + WINML_EXPECT_NO_THROW(learningModelSession.Evaluate(learningModelBinding, L"")); auto buffer2 = outputValue.GetAsVectorView(); - EXPECT_EQ(buffer1.Size(), buffer2.Size()); + WINML_EXPECT_EQUAL(buffer1.Size(), buffer2.Size()); bool isSame = true; for (uint32_t i = 0; i < buffer1.Size(); ++i) { @@ -555,7 +568,7 @@ TEST_F(LearningModelBindingAPITest, VerifyOutputAfterEvaluateAsyncCalledTwice) break; } } - EXPECT_FALSE(isSame); + WINML_EXPECT_FALSE(isSame); } static VideoFrame CreateVideoFrame(const wchar_t* path) @@ -567,7 +580,7 @@ static VideoFrame CreateVideoFrame(const wchar_t* path) return VideoFrame::CreateWithSoftwareBitmap(softwareBitmap); } -TEST_F(LearningModelBindingAPITest, VerifyOutputAfterImageBindCalledTwice) +static void VerifyOutputAfterImageBindCalledTwice() { std::wstring fullModelPath = FileHelpers::GetModulePath() + L"model.onnx"; std::wstring fullImagePath1 = FileHelpers::GetModulePath() + L"kitten_224.png"; @@ -575,9 +588,9 @@ TEST_F(LearningModelBindingAPITest, VerifyOutputAfterImageBindCalledTwice) // winml model creation LearningModel model = nullptr; - EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(fullModelPath)); + WINML_EXPECT_NO_THROW(model = LearningModel::LoadFromFilePath(fullModelPath)); LearningModelSession modelSession = nullptr; - EXPECT_NO_THROW(modelSession = LearningModelSession(model, LearningModelDevice(LearningModelDeviceKind::Default))); + WINML_EXPECT_NO_THROW(modelSession = LearningModelSession(model, LearningModelDevice(LearningModelDeviceKind::Default))); LearningModelBinding modelBinding(modelSession); // create the tensor for the actual output @@ -587,8 +600,8 @@ TEST_F(LearningModelBindingAPITest, VerifyOutputAfterImageBindCalledTwice) // Bind image 1 and evaluate auto frame = CreateVideoFrame(fullImagePath1.c_str()); auto imageTensor = ImageFeatureValue::CreateFromVideoFrame(frame); - EXPECT_NO_THROW(modelBinding.Bind(L"data_0", imageTensor)); - EXPECT_NO_THROW(modelSession.Evaluate(modelBinding, L"")); + WINML_EXPECT_NO_THROW(modelBinding.Bind(L"data_0", imageTensor)); + WINML_EXPECT_NO_THROW(modelSession.Evaluate(modelBinding, L"")); // Store 1st result auto outputVectorView1 = output.GetAsVectorView(); @@ -598,13 +611,13 @@ TEST_F(LearningModelBindingAPITest, VerifyOutputAfterImageBindCalledTwice) // The expected result is that the videoframe will be re-tensorized at bind auto frame2 = CreateVideoFrame(fullImagePath2.c_str()); frame2.CopyToAsync(frame).get(); - EXPECT_NO_THROW(modelBinding.Bind(L"data_0", imageTensor)); - EXPECT_NO_THROW(modelSession.Evaluate(modelBinding, L"")); + WINML_EXPECT_NO_THROW(modelBinding.Bind(L"data_0", imageTensor)); + WINML_EXPECT_NO_THROW(modelSession.Evaluate(modelBinding, L"")); // Store 2nd result auto outputVectorView2 = output.GetAsVectorView(); - EXPECT_EQ(outputVectorView1.Size(), outputVectorView2.Size()); + WINML_EXPECT_EQUAL(outputVectorView1.Size(), outputVectorView2.Size()); bool isSame = true; for (uint32_t i = 0; i < outputVectorView1.Size(); ++i) { @@ -614,5 +627,32 @@ TEST_F(LearningModelBindingAPITest, VerifyOutputAfterImageBindCalledTwice) break; } } - EXPECT_FALSE(isSame); + WINML_EXPECT_FALSE(isSame); } + +const LearningModelBindingAPITestApi& getapi() { + static constexpr LearningModelBindingAPITestApi api = + { + LearningModelBindingAPITestSetup, + LearningModelBindingAPITestGpuSetup, + CpuSqueezeNet, + CpuSqueezeNetEmptyOutputs, + CpuSqueezeNetUnboundOutputs, + CpuSqueezeNetBindInputTensorAsInspectable, + CastMapInt64, + DictionaryVectorizerMapInt64, + DictionaryVectorizerMapString, + ZipMapInt64, + ZipMapInt64Unbound, + ZipMapString, + GpuSqueezeNet, + GpuSqueezeNetEmptyOutputs, + GpuSqueezeNetUnboundOutputs, + ImageBindingDimensions, + VerifyInvalidBindExceptions, + BindInvalidInputName, + VerifyOutputAfterEvaluateAsyncCalledTwice, + VerifyOutputAfterImageBindCalledTwice + }; + return api; +} \ No newline at end of file diff --git a/winml/test/api/LearningModelBindingAPITest.h b/winml/test/api/LearningModelBindingAPITest.h new file mode 100644 index 0000000000000..c93be87c3cb60 --- /dev/null +++ b/winml/test/api/LearningModelBindingAPITest.h @@ -0,0 +1,49 @@ +#include "test.h" + +struct LearningModelBindingAPITestApi { + SetupTest LearningModelBindingAPITestSetup; + SetupTest LearningModelBindingAPITestGpuSetup; + VoidTest CpuSqueezeNet; + VoidTest CpuSqueezeNetEmptyOutputs; + VoidTest CpuSqueezeNetUnboundOutputs; + VoidTest CpuSqueezeNetBindInputTensorAsInspectable; + VoidTest CastMapInt64; + VoidTest DictionaryVectorizerMapInt64; + VoidTest DictionaryVectorizerMapString; + VoidTest ZipMapInt64; + VoidTest ZipMapInt64Unbound; + VoidTest ZipMapString; + VoidTest GpuSqueezeNet; + VoidTest GpuSqueezeNetEmptyOutputs; + VoidTest GpuSqueezeNetUnboundOutputs; + VoidTest ImageBindingDimensions; + VoidTest VerifyInvalidBindExceptions; + VoidTest BindInvalidInputName; + VoidTest VerifyOutputAfterEvaluateAsyncCalledTwice; + VoidTest VerifyOutputAfterImageBindCalledTwice; +}; +const LearningModelBindingAPITestApi& getapi(); + +WINML_TEST_CLASS_BEGIN_WITH_SETUP(LearningModelBindingAPITest, LearningModelBindingAPITestSetup) +WINML_TEST(LearningModelBindingAPITest, CpuSqueezeNet) +WINML_TEST(LearningModelBindingAPITest, CpuSqueezeNetEmptyOutputs) +WINML_TEST(LearningModelBindingAPITest, CpuSqueezeNetUnboundOutputs) +WINML_TEST(LearningModelBindingAPITest, CpuSqueezeNetBindInputTensorAsInspectable) +WINML_TEST(LearningModelBindingAPITest, CastMapInt64) +WINML_TEST(LearningModelBindingAPITest, DictionaryVectorizerMapInt64) +WINML_TEST(LearningModelBindingAPITest, DictionaryVectorizerMapString) +WINML_TEST(LearningModelBindingAPITest, ZipMapInt64) +WINML_TEST(LearningModelBindingAPITest, ZipMapInt64Unbound) +WINML_TEST(LearningModelBindingAPITest, ZipMapString) +WINML_TEST(LearningModelBindingAPITest, VerifyOutputAfterEvaluateAsyncCalledTwice) +WINML_TEST(LearningModelBindingAPITest, VerifyOutputAfterImageBindCalledTwice) +WINML_TEST_CLASS_END() + +WINML_TEST_CLASS_BEGIN_WITH_SETUP(LearningModelBindingAPITestGpu, LearningModelBindingAPITestGpuSetup) +WINML_TEST(LearningModelBindingAPITestGpu, GpuSqueezeNet) +WINML_TEST(LearningModelBindingAPITestGpu, GpuSqueezeNetEmptyOutputs) +WINML_TEST(LearningModelBindingAPITestGpu, GpuSqueezeNetUnboundOutputs) +WINML_TEST(LearningModelBindingAPITestGpu, ImageBindingDimensions) +WINML_TEST(LearningModelBindingAPITestGpu, VerifyInvalidBindExceptions) +WINML_TEST(LearningModelBindingAPITestGpu, BindInvalidInputName) +WINML_TEST_CLASS_END() \ No newline at end of file diff --git a/winml/test/api/LearningModelSessionAPITest.cpp b/winml/test/api/LearningModelSessionAPITest.cpp index bc6494a3a71ac..6ee0b3d57f751 100644 --- a/winml/test/api/LearningModelSessionAPITest.cpp +++ b/winml/test/api/LearningModelSessionAPITest.cpp @@ -1,6 +1,6 @@ #include "testPch.h" #include "APITest.h" - +#include "LearningModelSessionAPITest.h" #include "winrt/Windows.Storage.h" #include "DeviceHelpers.h" @@ -16,144 +16,151 @@ using namespace winrt::Windows::Foundation::Collections; using winrt::Windows::Foundation::IPropertyValue; -class LearningModelSessionAPITests : public APITest -{}; +static void LearningModelSessionAPITestSetup() { + init_apartment(); +} -class LearningModelSessionAPITestsGpu : public APITest -{ -protected: - void SetUp() override - { - GPUTEST - } -}; +static void LearningModelSessionAPITestGpuSetup() { + GPUTEST; + init_apartment(); +} -class LearningModelSessionAPITestsSkipEdgeCore : public LearningModelSessionAPITestsGpu -{ -protected: - void SetUp() override - { - LearningModelSessionAPITestsGpu::SetUp(); - SKIP_EDGECORE - } -}; +static void LearningModelSessionAPITestsSkipEdgeCoreSetup() { + LearningModelSessionAPITestGpuSetup(); + SKIP_EDGECORE +} -TEST_F(LearningModelSessionAPITests, CreateSessionDeviceDefault) +static void CreateSessionDeviceDefault() { - EXPECT_NO_THROW(LoadModel(L"model.onnx")); + LearningModel learningModel = nullptr; + LearningModelDevice learningModelDevice = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"model.onnx", learningModel)); - EXPECT_NO_THROW(m_device = LearningModelDevice(LearningModelDeviceKind::Default)); - EXPECT_NO_THROW(m_session = LearningModelSession(m_model, m_device)); + WINML_EXPECT_NO_THROW(learningModelDevice = LearningModelDevice(LearningModelDeviceKind::Default)); + WINML_EXPECT_NO_THROW(LearningModelSession(learningModel, learningModelDevice)); } -TEST_F(LearningModelSessionAPITests, CreateSessionDeviceCpu) +static void CreateSessionDeviceCpu() { - EXPECT_NO_THROW(LoadModel(L"model.onnx")); + LearningModel learningModel = nullptr; + LearningModelDevice learningModelDevice = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"model.onnx", learningModel)); - EXPECT_NO_THROW(m_device = LearningModelDevice(LearningModelDeviceKind::Cpu)); - EXPECT_NO_THROW(m_session = LearningModelSession(m_model, m_device)); + WINML_EXPECT_NO_THROW(learningModelDevice = LearningModelDevice(LearningModelDeviceKind::Cpu)); + WINML_EXPECT_NO_THROW(LearningModelSession(learningModel, learningModelDevice)); // for the CPU device, make sure that we get back NULL and 0 for any device properties - EXPECT_FALSE(m_device.Direct3D11Device()); + WINML_EXPECT_EQUAL(learningModelDevice.Direct3D11Device(), nullptr); LARGE_INTEGER id; - id.QuadPart = GetAdapterIdQuadPart(); - EXPECT_EQ(id.LowPart, static_cast(0)); - EXPECT_EQ(id.HighPart, 0); + id.QuadPart = APITest::GetAdapterIdQuadPart(learningModelDevice); + WINML_EXPECT_EQUAL(id.LowPart, static_cast(0)); + WINML_EXPECT_EQUAL(id.HighPart, 0); } -TEST_F(LearningModelSessionAPITests, CreateSessionWithModelLoadedFromStream) +static void CreateSessionWithModelLoadedFromStream() { + LearningModel learningModel = nullptr; + LearningModelDevice learningModelDevice = nullptr; std::wstring path = FileHelpers::GetModulePath() + L"model.onnx"; auto storageFile = winrt::Windows::Storage::StorageFile::GetFileFromPathAsync(path).get(); - EXPECT_NO_THROW(m_model = LearningModel::LoadFromStream(storageFile)); + WINML_EXPECT_NO_THROW(learningModel = LearningModel::LoadFromStream(storageFile)); - EXPECT_NO_THROW(m_device = LearningModelDevice(LearningModelDeviceKind::Default)); - EXPECT_NO_THROW(m_session = LearningModelSession(m_model, m_device)); + WINML_EXPECT_NO_THROW(learningModelDevice = LearningModelDevice(LearningModelDeviceKind::Default)); + WINML_EXPECT_NO_THROW(LearningModelSession(learningModel, learningModelDevice)); } -TEST_F(LearningModelSessionAPITestsGpu, CreateSessionDeviceDirectX) +static void CreateSessionDeviceDirectX() { - EXPECT_NO_THROW(LoadModel(L"model.onnx")); + LearningModel learningModel = nullptr; + LearningModelDevice learningModelDevice = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"model.onnx", learningModel)); - EXPECT_NO_THROW(m_device = LearningModelDevice(LearningModelDeviceKind::DirectX)); - EXPECT_NO_THROW(m_session = LearningModelSession(m_model, m_device)); + WINML_EXPECT_NO_THROW(learningModelDevice = LearningModelDevice(LearningModelDeviceKind::DirectX)); + WINML_EXPECT_NO_THROW(LearningModelSession(learningModel, learningModelDevice)); } -TEST_F(LearningModelSessionAPITestsGpu, CreateSessionDeviceDirectXHighPerformance) +static void CreateSessionDeviceDirectXHighPerformance() { - EXPECT_NO_THROW(LoadModel(L"model.onnx")); + LearningModel learningModel = nullptr; + LearningModelDevice learningModelDevice = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"model.onnx", learningModel)); - EXPECT_NO_THROW(m_device = LearningModelDevice(LearningModelDeviceKind::DirectXHighPerformance)); - EXPECT_NO_THROW(m_session = LearningModelSession(m_model, m_device)); + WINML_EXPECT_NO_THROW(learningModelDevice = LearningModelDevice(LearningModelDeviceKind::DirectXHighPerformance)); + WINML_EXPECT_NO_THROW(LearningModelSession(learningModel, learningModelDevice)); } -TEST_F(LearningModelSessionAPITestsGpu, CreateSessionDeviceDirectXMinimumPower) +static void CreateSessionDeviceDirectXMinimumPower() { - EXPECT_NO_THROW(LoadModel(L"model.onnx")); + LearningModel learningModel = nullptr; + LearningModelDevice learningModelDevice = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"model.onnx", learningModel)); - EXPECT_NO_THROW(m_device = LearningModelDevice(LearningModelDeviceKind::DirectXMinPower)); - EXPECT_NO_THROW(m_session = LearningModelSession(m_model, m_device)); + WINML_EXPECT_NO_THROW(learningModelDevice = LearningModelDevice(LearningModelDeviceKind::DirectXMinPower)); + WINML_EXPECT_NO_THROW(LearningModelSession(learningModel, learningModelDevice)); } -TEST_F(LearningModelSessionAPITestsSkipEdgeCore, AdapterIdAndDevice) -{ - EXPECT_NO_THROW(LoadModel(L"model.onnx")); +static void AdapterIdAndDevice() { + LearningModel learningModel = nullptr; + LearningModelDevice learningModelDevice = nullptr; + LearningModelSession learningModelSession = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"model.onnx", learningModel)); com_ptr factory; - EXPECT_HRESULT_SUCCEEDED(CreateDXGIFactory1(__uuidof(IDXGIFactory6), factory.put_void())); + WINML_EXPECT_HRESULT_SUCCEEDED(CreateDXGIFactory1(__uuidof(IDXGIFactory6), factory.put_void())); com_ptr adapter; - m_device = LearningModelDevice(LearningModelDeviceKind::DirectX); - EXPECT_HRESULT_SUCCEEDED(factory->EnumAdapters(0, adapter.put())); + learningModelDevice = LearningModelDevice(LearningModelDeviceKind::DirectX); + WINML_EXPECT_HRESULT_SUCCEEDED(factory->EnumAdapters(0, adapter.put())); DXGI_ADAPTER_DESC desc; - EXPECT_HRESULT_SUCCEEDED(adapter->GetDesc(&desc)); + WINML_EXPECT_HRESULT_SUCCEEDED(adapter->GetDesc(&desc)); LARGE_INTEGER id; - id.QuadPart = GetAdapterIdQuadPart(); - EXPECT_EQ(desc.AdapterLuid.LowPart, id.LowPart); - EXPECT_EQ(desc.AdapterLuid.HighPart, id.HighPart); - EXPECT_TRUE(m_device.Direct3D11Device() != nullptr); + id.QuadPart = APITest::GetAdapterIdQuadPart(learningModelDevice); + WINML_EXPECT_EQUAL(desc.AdapterLuid.LowPart, id.LowPart); + WINML_EXPECT_EQUAL(desc.AdapterLuid.HighPart, id.HighPart); + WINML_EXPECT_TRUE(learningModelDevice.Direct3D11Device() != nullptr); - m_device = LearningModelDevice(LearningModelDeviceKind::DirectXHighPerformance); + learningModelDevice = LearningModelDevice(LearningModelDeviceKind::DirectXHighPerformance); adapter = nullptr; - EXPECT_HRESULT_SUCCEEDED(factory->EnumAdapterByGpuPreference(0, DXGI_GPU_PREFERENCE_HIGH_PERFORMANCE, __uuidof(IDXGIAdapter), adapter.put_void())); - EXPECT_HRESULT_SUCCEEDED(adapter->GetDesc(&desc)); - id.QuadPart = GetAdapterIdQuadPart(); - EXPECT_EQ(desc.AdapterLuid.LowPart, id.LowPart); - EXPECT_EQ(desc.AdapterLuid.HighPart, id.HighPart); - EXPECT_TRUE(m_device.Direct3D11Device() != nullptr); + WINML_EXPECT_HRESULT_SUCCEEDED(factory->EnumAdapterByGpuPreference(0, DXGI_GPU_PREFERENCE_HIGH_PERFORMANCE, __uuidof(IDXGIAdapter), adapter.put_void())); + WINML_EXPECT_HRESULT_SUCCEEDED(adapter->GetDesc(&desc)); + id.QuadPart = APITest::GetAdapterIdQuadPart(learningModelDevice); + WINML_EXPECT_EQUAL(desc.AdapterLuid.LowPart, id.LowPart); + WINML_EXPECT_EQUAL(desc.AdapterLuid.HighPart, id.HighPart); + WINML_EXPECT_TRUE(learningModelDevice.Direct3D11Device() != nullptr); adapter = nullptr; - m_device = LearningModelDevice(LearningModelDeviceKind::DirectXMinPower); - EXPECT_HRESULT_SUCCEEDED(factory->EnumAdapterByGpuPreference(0, DXGI_GPU_PREFERENCE_MINIMUM_POWER, __uuidof(IDXGIAdapter), adapter.put_void())); - EXPECT_HRESULT_SUCCEEDED(adapter->GetDesc(&desc)); - id.QuadPart = GetAdapterIdQuadPart(); - EXPECT_EQ(desc.AdapterLuid.LowPart, id.LowPart); - EXPECT_EQ(desc.AdapterLuid.HighPart, id.HighPart); - EXPECT_TRUE(m_device.Direct3D11Device() != nullptr); - - EXPECT_NO_THROW(m_session = LearningModelSession(m_model, m_device)); - EXPECT_EQ(m_session.Device().AdapterId(), m_device.AdapterId()); + learningModelDevice = LearningModelDevice(LearningModelDeviceKind::DirectXMinPower); + WINML_EXPECT_HRESULT_SUCCEEDED(factory->EnumAdapterByGpuPreference(0, DXGI_GPU_PREFERENCE_MINIMUM_POWER, __uuidof(IDXGIAdapter), adapter.put_void())); + WINML_EXPECT_HRESULT_SUCCEEDED(adapter->GetDesc(&desc)); + id.QuadPart = APITest::GetAdapterIdQuadPart(learningModelDevice); + WINML_EXPECT_EQUAL(desc.AdapterLuid.LowPart, id.LowPart); + WINML_EXPECT_EQUAL(desc.AdapterLuid.HighPart, id.HighPart); + WINML_EXPECT_TRUE(learningModelDevice.Direct3D11Device() != nullptr); + + WINML_EXPECT_NO_THROW(learningModelSession = LearningModelSession(learningModel, learningModelDevice)); + WINML_EXPECT_EQUAL(learningModelSession.Device().AdapterId(), learningModelDevice.AdapterId()); } -TEST_F(LearningModelSessionAPITests, EvaluateFeatures) +static void EvaluateFeatures() { std::vector shape = { 4 }; std::vector data = { L"one", L"two", L"three", L"four" }; // create from buffer auto tensor = TensorString::CreateFromArray(shape, data); - EXPECT_EQ(tensor.GetAsVectorView().Size(), data.size()); - EXPECT_TRUE(std::equal(data.cbegin(), data.cend(), begin(tensor.GetAsVectorView()))); + WINML_EXPECT_EQUAL(tensor.GetAsVectorView().Size(), data.size()); + WINML_EXPECT_TRUE(std::equal(data.cbegin(), data.cend(), begin(tensor.GetAsVectorView()))); // create from vector view auto dataCopy = data; tensor = TensorString::CreateFromIterable( shape, winrt::single_threaded_vector(std::move(dataCopy)).GetView()); - EXPECT_EQ(tensor.GetAsVectorView().Size(), data.size()); - EXPECT_TRUE(std::equal(data.cbegin(), data.cend(), begin(tensor.GetAsVectorView()))); + WINML_EXPECT_EQUAL(tensor.GetAsVectorView().Size(), data.size()); + WINML_EXPECT_TRUE(std::equal(data.cbegin(), data.cend(), begin(tensor.GetAsVectorView()))); - EXPECT_NO_THROW(LoadModel(L"id-tensor-string.onnx")); - LearningModelSession session(m_model); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"id-tensor-string.onnx", learningModel)); + LearningModelSession session(learningModel); auto outputTensor = TensorString::Create(); @@ -164,29 +171,30 @@ TEST_F(LearningModelSessionAPITests, EvaluateFeatures) session.EvaluateFeatures(featureswinrtmap, L"0"); // verify identity model round-trip works - EXPECT_EQ(outputTensor.GetAsVectorView().Size(), data.size()); - EXPECT_TRUE(std::equal(data.cbegin(), data.cend(), begin(outputTensor.GetAsVectorView()))); + WINML_EXPECT_EQUAL(outputTensor.GetAsVectorView().Size(), data.size()); + WINML_EXPECT_TRUE(std::equal(data.cbegin(), data.cend(), begin(outputTensor.GetAsVectorView()))); } -TEST_F(LearningModelSessionAPITests, EvaluateFeaturesAsync) +static void EvaluateFeaturesAsync() { std::vector shape = { 4 }; std::vector data = { L"one", L"two", L"three", L"four" }; // create from buffer auto tensor = TensorString::CreateFromArray(shape, data); - EXPECT_EQ(tensor.GetAsVectorView().Size(), data.size()); - EXPECT_TRUE(std::equal(data.cbegin(), data.cend(), begin(tensor.GetAsVectorView()))); + WINML_EXPECT_EQUAL(tensor.GetAsVectorView().Size(), data.size()); + WINML_EXPECT_TRUE(std::equal(data.cbegin(), data.cend(), begin(tensor.GetAsVectorView()))); // create from vector view auto dataCopy = data; tensor = TensorString::CreateFromIterable( shape, winrt::single_threaded_vector(std::move(dataCopy)).GetView()); - EXPECT_EQ(tensor.GetAsVectorView().Size(), data.size()); - EXPECT_TRUE(std::equal(data.cbegin(), data.cend(), begin(tensor.GetAsVectorView()))); + WINML_EXPECT_EQUAL(tensor.GetAsVectorView().Size(), data.size()); + WINML_EXPECT_TRUE(std::equal(data.cbegin(), data.cend(), begin(tensor.GetAsVectorView()))); - EXPECT_NO_THROW(LoadModel(L"id-tensor-string.onnx")); - LearningModelSession session(m_model); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"id-tensor-string.onnx", learningModel)); + LearningModelSession session(learningModel); auto outputTensor = TensorString::Create(shape); @@ -197,37 +205,39 @@ TEST_F(LearningModelSessionAPITests, EvaluateFeaturesAsync) session.EvaluateFeaturesAsync(featureswinrtmap, L"0").get(); // verify identity model round-trip works - EXPECT_EQ(outputTensor.GetAsVectorView().Size(), data.size()); - EXPECT_TRUE(std::equal(data.cbegin(), data.cend(), begin(outputTensor.GetAsVectorView()))); + WINML_EXPECT_EQUAL(outputTensor.GetAsVectorView().Size(), data.size()); + WINML_EXPECT_TRUE(std::equal(data.cbegin(), data.cend(), begin(outputTensor.GetAsVectorView()))); } -TEST_F(LearningModelSessionAPITests, EvaluationProperties) +static void EvaluationProperties() { // load a model - EXPECT_NO_THROW(LoadModel(L"model.onnx")); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"model.onnx", learningModel)); // create a session - m_session = LearningModelSession(m_model); + LearningModelSession learningModelSession = nullptr; + learningModelSession = LearningModelSession(learningModel); // set a property auto value = winrt::Windows::Foundation::PropertyValue::CreateBoolean(true); - m_session.EvaluationProperties().Insert(L"propName1", value); + learningModelSession.EvaluationProperties().Insert(L"propName1", value); // get the property and make sure it's there with the right value - auto value2 = m_session.EvaluationProperties().Lookup(L"propName1"); - EXPECT_EQ(value2.as().GetBoolean(), true); + auto value2 = learningModelSession.EvaluationProperties().Lookup(L"propName1"); + WINML_EXPECT_EQUAL(value2.as().GetBoolean(), true); } static LearningModelSession CreateSession(LearningModel model) { LearningModelDevice device(nullptr); - EXPECT_NO_THROW(device = LearningModelDevice(LearningModelDeviceKind::DirectX)); + WINML_EXPECT_NO_THROW(device = LearningModelDevice(LearningModelDeviceKind::DirectX)); LearningModelSession session(nullptr); if (DeviceHelpers::IsFloat16Supported(device)) { - EXPECT_NO_THROW(session = LearningModelSession(model, device)); + WINML_EXPECT_NO_THROW(session = LearningModelSession(model, device)); } else { - EXPECT_THROW_SPECIFIC( + WINML_EXPECT_THROW_SPECIFIC( session = LearningModelSession(model, device), winrt::hresult_error, [](const winrt::hresult_error& e) -> bool @@ -239,26 +249,28 @@ static LearningModelSession CreateSession(LearningModel model) return session; } -TEST_F(LearningModelSessionAPITestsGpu, CreateSessionWithCastToFloat16InModel) +static void CreateSessionWithCastToFloat16InModel() { // load a model - EXPECT_NO_THROW(LoadModel(L"fp16-truncate-with-cast.onnx")); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"fp16-truncate-with-cast.onnx", learningModel)); - CreateSession(m_model); + CreateSession(learningModel); } -TEST_F(LearningModelSessionAPITestsGpu, DISABLED_CreateSessionWithFloat16InitializersInModel) +static void DISABLED_CreateSessionWithFloat16InitializersInModel() { // Disabled due to https://microsoft.visualstudio.com/DefaultCollection/OS/_workitems/edit/21624720: // Model fails to resolve due to ORT using incorrect IR version within partition // load a model - EXPECT_NO_THROW(LoadModel(L"fp16-initializer.onnx")); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"fp16-initializer.onnx", learningModel)); - CreateSession(m_model); + CreateSession(learningModel); } -static void EvaluateSessionAndCloseModel( +static void EvaluateSessionAndCloseModelHelper( LearningModelDeviceKind kind, bool close_model_on_session_creation) { @@ -275,7 +287,7 @@ static void EvaluateSessionAndCloseModel( // ensure you can create a session from the model LearningModelSession session(nullptr); - EXPECT_NO_THROW(session = LearningModelSession(model, device, options)); + WINML_EXPECT_NO_THROW(session = LearningModelSession(model, device, options)); std::vector input(1000); std::iota(std::begin(input), std::end(input), 0.0f); @@ -284,12 +296,12 @@ static void EvaluateSessionAndCloseModel( binding.Bind(L"input", tensor_input); LearningModelEvaluationResult result(nullptr); - EXPECT_NO_THROW(result = session.Evaluate(binding, L"")); + WINML_EXPECT_NO_THROW(result = session.Evaluate(binding, L"")); if (close_model_on_session_creation) { // ensure that the model has been closed - EXPECT_THROW_SPECIFIC( + WINML_EXPECT_THROW_SPECIFIC( LearningModelSession(model, device, options), winrt::hresult_error, [](const winrt::hresult_error& e) -> bool @@ -299,19 +311,20 @@ static void EvaluateSessionAndCloseModel( } else { - EXPECT_NO_THROW(LearningModelSession(model, device, options)); + WINML_EXPECT_NO_THROW(LearningModelSession(model, device, options)); } } -TEST_F(LearningModelSessionAPITests, EvaluateSessionAndCloseModel) +static void EvaluateSessionAndCloseModel() { - EXPECT_NO_THROW(::EvaluateSessionAndCloseModel(LearningModelDeviceKind::Cpu, true)); - EXPECT_NO_THROW(::EvaluateSessionAndCloseModel(LearningModelDeviceKind::Cpu, false)); + WINML_EXPECT_NO_THROW(::EvaluateSessionAndCloseModelHelper(LearningModelDeviceKind::Cpu, true)); + WINML_EXPECT_NO_THROW(::EvaluateSessionAndCloseModelHelper(LearningModelDeviceKind::Cpu, false)); } -TEST_F(LearningModelSessionAPITests, CloseSession) +static void CloseSession() { - EXPECT_NO_THROW(LoadModel(L"model.onnx")); + LearningModel learningModel = nullptr; + WINML_EXPECT_NO_THROW(APITest::LoadModel(L"model.onnx", learningModel)); LearningModelSession session = nullptr; /* @@ -329,7 +342,7 @@ TEST_F(LearningModelSessionAPITests, CloseSession) SIZE_T afterSessionCloseWorkingSetSize = 0; bool getProcessMemoryInfoSuccess = false; */ - EXPECT_NO_THROW(session = LearningModelSession(m_model)); + WINML_EXPECT_NO_THROW(session = LearningModelSession(learningModel)); /* // Get the current process memory info after session creation. @@ -341,7 +354,7 @@ TEST_F(LearningModelSessionAPITests, CloseSession) beforeSessionCloseWorkingSetSize = pmc.WorkingSetSize; pmc = { 0 }; */ - EXPECT_NO_THROW(session.Close()); + WINML_EXPECT_NO_THROW(session.Close()); /* Bug 23659026: Working set difference tolerance is too tight for LearningModelSessionAPITests::CloseSession @@ -367,17 +380,41 @@ TEST_F(LearningModelSessionAPITests, CloseSession) */ // verify that model still has metadata info after session close - std::wstring author(m_model.Author()); - EXPECT_EQ(author, L"onnx-caffe2"); + std::wstring author(learningModel.Author()); + WINML_EXPECT_EQUAL(author, L"onnx-caffe2"); // verify that session throws RO_E_CLOSED error std::vector input(1 * 3 * 224 * 224, 0); std::vector shape = { 1, 3, 224, 224 }; auto tensor_input = TensorFloat::CreateFromShapeArrayAndDataArray(shape, input); - EXPECT_THROW_SPECIFIC(LearningModelBinding binding(session), + WINML_EXPECT_THROW_SPECIFIC(LearningModelBinding binding(session), winrt::hresult_error, [](const winrt::hresult_error &e) -> bool { return e.code() == RO_E_CLOSED; }); } + +const LearningModelSesssionAPITestApi& getapi() { + static constexpr LearningModelSesssionAPITestApi api = + { + LearningModelSessionAPITestSetup, + LearningModelSessionAPITestGpuSetup, + LearningModelSessionAPITestsSkipEdgeCoreSetup, + CreateSessionDeviceDefault, + CreateSessionDeviceCpu, + CreateSessionWithModelLoadedFromStream, + CreateSessionDeviceDirectX, + CreateSessionDeviceDirectXHighPerformance, + CreateSessionDeviceDirectXMinimumPower, + AdapterIdAndDevice, + EvaluateFeatures, + EvaluateFeaturesAsync, + EvaluationProperties, + CreateSessionWithCastToFloat16InModel, + DISABLED_CreateSessionWithFloat16InitializersInModel, + EvaluateSessionAndCloseModel, + CloseSession, + }; + return api; +} \ No newline at end of file diff --git a/winml/test/api/LearningModelSessionAPITest.h b/winml/test/api/LearningModelSessionAPITest.h new file mode 100644 index 0000000000000..b98cb56f9fbd7 --- /dev/null +++ b/winml/test/api/LearningModelSessionAPITest.h @@ -0,0 +1,44 @@ +#include "test.h" + +struct LearningModelSesssionAPITestApi { + SetupTest LearningModelSessionAPITestSetup; + SetupTest LearningModelSessionAPITestGpuSetup; + SetupTest LearningModelSessionAPITestsSkipEdgeCoreSetup; + VoidTest CreateSessionDeviceDefault; + VoidTest CreateSessionDeviceCpu; + VoidTest CreateSessionWithModelLoadedFromStream; + VoidTest CreateSessionDeviceDirectX; + VoidTest CreateSessionDeviceDirectXHighPerformance; + VoidTest CreateSessionDeviceDirectXMinimumPower; + VoidTest AdapterIdAndDevice; + VoidTest EvaluateFeatures; + VoidTest EvaluateFeaturesAsync; + VoidTest EvaluationProperties; + VoidTest CreateSessionWithCastToFloat16InModel; + VoidTest DISABLED_CreateSessionWithFloat16InitializersInModel; + VoidTest EvaluateSessionAndCloseModel; + VoidTest CloseSession; +}; +const LearningModelSesssionAPITestApi& getapi(); + +WINML_TEST_CLASS_BEGIN_WITH_SETUP(LearningModelSessionAPITest, LearningModelSessionAPITestSetup) +WINML_TEST(LearningModelSessionAPITest, CreateSessionDeviceDefault) +WINML_TEST(LearningModelSessionAPITest,CreateSessionDeviceCpu) +WINML_TEST(LearningModelSessionAPITest,CreateSessionWithModelLoadedFromStream) +WINML_TEST(LearningModelSessionAPITest,EvaluateFeatures) +WINML_TEST(LearningModelSessionAPITest,EvaluateFeaturesAsync) +WINML_TEST(LearningModelSessionAPITest,EvaluationProperties) +WINML_TEST(LearningModelSessionAPITest,EvaluateSessionAndCloseModel) +WINML_TEST_CLASS_END() + +WINML_TEST_CLASS_BEGIN_WITH_SETUP(LearningModelSessionAPITestGpu, LearningModelSessionAPITestGpuSetup) +WINML_TEST(LearningModelSessionAPITestGpu, CreateSessionDeviceDirectX) +WINML_TEST(LearningModelSessionAPITestGpu, CreateSessionDeviceDirectXHighPerformance) +WINML_TEST(LearningModelSessionAPITestGpu, CreateSessionDeviceDirectXMinimumPower) +WINML_TEST(LearningModelSessionAPITestGpu, CreateSessionWithCastToFloat16InModel) +WINML_TEST(LearningModelSessionAPITestGpu, DISABLED_CreateSessionWithFloat16InitializersInModel) +WINML_TEST_CLASS_END() + +WINML_TEST_CLASS_BEGIN_WITH_SETUP(LearningModelSessionAPITestsSkipEdgeCore, LearningModelSessionAPITestsSkipEdgeCoreSetup) +WINML_TEST(LearningModelSessionAPITestsSkipEdgeCore, AdapterIdAndDevice) +WINML_TEST_CLASS_END() \ No newline at end of file diff --git a/winml/test/common/googleTestMacros.h b/winml/test/common/googleTestMacros.h index 55fb2f7f740fe..bf9b583824081 100644 --- a/winml/test/common/googleTestMacros.h +++ b/winml/test/common/googleTestMacros.h @@ -35,12 +35,14 @@ #define WINML_EXPECT_NO_THROW(statement) EXPECT_NO_THROW(statement) #define WINML_EXPECT_TRUE(statement) EXPECT_TRUE(statement) +#define WINML_EXPECT_FALSE(statement) EXPECT_FALSE(statement) #define WINML_EXPECT_EQUAL(val1, val2) EXPECT_EQ(val1, val2) #define WINML_EXPECT_NOT_EQUAL(val1, val2) EXPECT_NE(val1, val2) #define WINML_LOG_ERROR(message) \ ADD_FAILURE() << message - +#define WINML_LOG_COMMENT(message)\ + SCOPED_TRACE(message) #define WINML_EXPECT_HRESULT_SUCCEEDED(hresult_expression) EXPECT_HRESULT_SUCCEEDED(hresult_expression) #define WINML_EXPECT_HRESULT_FAILED(hresult_expression) EXPECT_HRESULT_FAILED(hresult_expression) #define WINML_EXPECT_THROW_SPECIFIC(statement, exception, condition) EXPECT_THROW_SPECIFIC(statement, exception, condition) diff --git a/winml/test/common/taefTestMacros.h b/winml/test/common/taefTestMacros.h index 7c7636b7a4dda..06d9048d7c6ff 100644 --- a/winml/test/common/taefTestMacros.h +++ b/winml/test/common/taefTestMacros.h @@ -30,11 +30,13 @@ using namespace WEX::TestExecution; #define WINML_EXPECT_NO_THROW(statement) VERIFY_NO_THROW(statement) #define WINML_EXPECT_TRUE(statement) VERIFY_IS_TRUE(statement) +#define WINML_EXPECT_FALSE(statement) VERIFY_IS_FALSE(statement) #define WINML_EXPECT_EQUAL(val1, val2) VERIFY_ARE_EQUAL(val1, val2) #define WINML_EXPECT_NOT_EQUAL(val1, val2) VERIFY_ARE_NOT_EQUAL(val1, val2) #define WINML_LOG_ERROR(message) \ VERIFY_FAIL(std::wstring_convert>().from_bytes(message).c_str()) - +#define WINML_LOG_COMMENT(message)\ + WEX::Logging::Log::Comment(std::wstring_convert>().from_bytes(message).c_str()) #define WINML_EXPECT_HRESULT_SUCCEEDED(hresult_expression) VERIFY_SUCCEEDED(hresult_expression) #define WINML_EXPECT_THROW_SPECIFIC(statement, exception, condition) VERIFY_THROWS_SPECIFIC(statement, exception, condition) #define WINML_EXPECT_HRESULT_FAILED(hresult_expression) VERIFY_FAILED(hresult_expression) From 37e71ac6cc50b483541bed05470d5fa3a3391be5 Mon Sep 17 00:00:00 2001 From: Ryan Lai Date: Mon, 13 Jan 2020 14:44:52 -0800 Subject: [PATCH 6/6] Move additional gtest specific macro definition into googleTestMacros.h --- winml/test/common/googleTestMacros.h | 15 +++++++++++++++ winml/test/common/std.h | 17 +---------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/winml/test/common/googleTestMacros.h b/winml/test/common/googleTestMacros.h index bf9b583824081..dc38c6402f005 100644 --- a/winml/test/common/googleTestMacros.h +++ b/winml/test/common/googleTestMacros.h @@ -30,6 +30,21 @@ #define GTEST_SKIP GTEST_SKIP_("") #endif +#define EXPECT_THROW_SPECIFIC(statement, exception, condition) \ + EXPECT_THROW( \ + try { \ + statement; \ + } catch (const exception& e) { \ + EXPECT_TRUE(condition(e)); \ + throw; \ + } \ + , exception); + +#ifndef INSTANTIATE_TEST_SUITE_P +// Use the old name, removed in newer versions of googletest +#define INSTANTIATE_TEST_SUITE_P INSTANTIATE_TEST_CASE_P +#endif + #define WINML_SKIP_TEST(message) \ GTEST_SKIP() << message; diff --git a/winml/test/common/std.h b/winml/test/common/std.h index 162915abd02fd..d6eab75f645f8 100644 --- a/winml/test/common/std.h +++ b/winml/test/common/std.h @@ -29,19 +29,4 @@ #include "comp_generated/winrt/windows.ai.machinelearning.h" // WinML -#include "Windows.AI.MachineLearning.Native.h" - -#define EXPECT_THROW_SPECIFIC(statement, exception, condition) \ - EXPECT_THROW( \ - try { \ - statement; \ - } catch (const exception& e) { \ - EXPECT_TRUE(condition(e)); \ - throw; \ - } \ - , exception); - -#ifndef INSTANTIATE_TEST_SUITE_P -// Use the old name, removed in newer versions of googletest -#define INSTANTIATE_TEST_SUITE_P INSTANTIATE_TEST_CASE_P -#endif \ No newline at end of file +#include "Windows.AI.MachineLearning.Native.h" \ No newline at end of file