diff --git a/src/runtime/contrib/libtorch/libtorch_runtime.cc b/src/runtime/contrib/libtorch/libtorch_runtime.cc index e76d04389ec7..e57ec426546b 100644 --- a/src/runtime/contrib/libtorch/libtorch_runtime.cc +++ b/src/runtime/contrib/libtorch/libtorch_runtime.cc @@ -24,16 +24,15 @@ // we do not want clang to reorder our includes // clang-format off -#include -#include -#include -#include - #include #include #include #include +#include +#include +#include +#include // clang-format on #include diff --git a/tests/python/contrib/test_libtorch_ops.py b/tests/python/contrib/test_libtorch_ops.py index 28ae39c329f5..501f27f6060b 100644 --- a/tests/python/contrib/test_libtorch_ops.py +++ b/tests/python/contrib/test_libtorch_ops.py @@ -15,6 +15,8 @@ # specific language governing permissions and limitations # under the License. +import tvm.testing + import pytest import tvm.relay @@ -29,6 +31,7 @@ import_torch_error = str(e) +@tvm.testing.requires_gpu @pytest.mark.skipif(torch is None, reason=f"PyTorch is not available: {import_torch_error}") def test_backend(): @torch.jit.script diff --git a/tests/scripts/task_config_build_gpu.sh b/tests/scripts/task_config_build_gpu.sh index 0145eb387bf4..6f37054264c4 100755 --- a/tests/scripts/task_config_build_gpu.sh +++ b/tests/scripts/task_config_build_gpu.sh @@ -48,3 +48,4 @@ echo set\(USE_LIBBACKTRACE AUTO\) >> config.cmake echo set\(USE_CCACHE OFF\) >> config.cmake echo set\(SUMMARIZE ON\) >> config.cmake echo set\(HIDE_PRIVATE_SYMBOLS ON\) >> config.cmake +echo set\(USE_LIBTORCH /usr/local/libtorch\) >> config.cmake diff --git a/tests/scripts/task_python_integration.sh b/tests/scripts/task_python_integration.sh index d831481acea6..f0330582ff09 100755 --- a/tests/scripts/task_python_integration.sh +++ b/tests/scripts/task_python_integration.sh @@ -64,6 +64,9 @@ run_pytest ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-integration tests/python/int run_pytest ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-contrib tests/python/contrib --ignore=tests/python/contrib/test_ethosu run_pytest ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-contrib-test_ethosu tests/python/contrib/test_ethosu -n auto +# we want this to run in particular in integration-gpu because there we have PyTorch +run_pytest cython libtorch tests/python/contrib/test_libtorch_ops.py + # forked is needed because the global registry gets contaminated TVM_TEST_TARGETS="${TVM_RELAY_TEST_TARGETS:-llvm;cuda}" \ run_pytest ctypes ${TVM_INTEGRATION_TESTSUITE_NAME}-relay tests/python/relay