From e7d6b9735f178e544d9490ab64a6d6854f2b2888 Mon Sep 17 00:00:00 2001 From: Hugo Latendresse Date: Mon, 28 Apr 2025 10:55:01 -0400 Subject: [PATCH 1/4] move --- .../{relax => nightly/test_nnapi}/test_from_exported_to_cuda.py | 1 - 1 file changed, 1 deletion(-) rename tests/python/{relax => nightly/test_nnapi}/test_from_exported_to_cuda.py (99%) diff --git a/tests/python/relax/test_from_exported_to_cuda.py b/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py similarity index 99% rename from tests/python/relax/test_from_exported_to_cuda.py rename to tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py index 6bb35b50b1df..8b23e899fc64 100644 --- a/tests/python/relax/test_from_exported_to_cuda.py +++ b/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py @@ -1,4 +1,3 @@ -# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file From 9b2d9983ec478106ba38680c0ac29466b0046381 Mon Sep 17 00:00:00 2001 From: Hugo Latendresse Date: Mon, 28 Apr 2025 10:56:07 -0400 Subject: [PATCH 2/4] add new tests --- .../test_nnapi/test_from_exported_to_cuda.py | 311 ++++++++++++++++++ 1 file changed, 311 insertions(+) diff --git a/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py b/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py index 8b23e899fc64..f68b5a234341 100644 --- a/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py +++ b/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py @@ -1,3 +1,4 @@ +# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file @@ -741,5 +742,315 @@ def forward(self, x): assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) +@tvm.testing.parametrize_targets("cuda") +def test_leakyrelu_module(target, dev): + class LeakyReLUModule(nn.Module): + def __init__(self): + super().__init__() + self.act = nn.LeakyReLU(negative_slope=0.1) + + def forward(self, x): + return self.act(x) + + raw_data = np.random.randn(2, 3).astype(np.float32) + torch_module = LeakyReLUModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_log_softmax_module(target, dev): + class LogSoftmaxModule(nn.Module): + def __init__(self): + super().__init__() + self.logsoftmax = nn.LogSoftmax(dim=1) + + def forward(self, x): + return self.logsoftmax(x) + + raw_data = np.random.randn(4, 5).astype(np.float32) + torch_module = LogSoftmaxModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_softmax_module(target, dev): + class SoftmaxModule(nn.Module): + def __init__(self): + super().__init__() + self.softmax = nn.Softmax(dim=1) + + def forward(self, x): + return self.softmax(x) + + raw_data = np.random.randn(4, 5).astype(np.float32) + torch_module = SoftmaxModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_adaptive_avg_pool2d_module(target, dev): + class AdaptiveAvgPool2dModule(nn.Module): + def __init__(self): + super().__init__() + self.pool = nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, x): + return self.pool(x) + + raw_data = np.random.randn(2, 3, 8, 8).astype(np.float32) + torch_module = AdaptiveAvgPool2dModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_avg_pool2d_module(target, dev): + class AvgPool2dModule(nn.Module): + def __init__(self): + super().__init__() + self.pool = nn.AvgPool2d(kernel_size=2) + + def forward(self, x): + return self.pool(x) + + raw_data = np.random.randn(2, 3, 8, 8).astype(np.float32) + torch_module = AvgPool2dModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_conv1d_module(target, dev): + class Conv1dModule(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv1d(in_channels=3, out_channels=4, kernel_size=3) + + def forward(self, x): + return self.conv(x) + + raw_data = np.random.randn(2, 3, 10).astype(np.float32) + torch_module = Conv1dModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_conv2d_module(target, dev): + class Conv2dModule(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv2d(in_channels=3, out_channels=4, kernel_size=3) + + def forward(self, x): + return self.conv(x) + + raw_data = np.random.randn(2, 3, 10, 10).astype(np.float32) + torch_module = Conv2dModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_conv3d_module(target, dev): + class Conv3dModule(nn.Module): + def __init__(self): + super().__init__() + self.conv = nn.Conv3d(in_channels=2, out_channels=3, kernel_size=3) + + def forward(self, x): + return self.conv(x) + + raw_data = np.random.randn(1, 2, 8, 8, 8).astype(np.float32) + torch_module = Conv3dModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_group_norm_module(target, dev): + class GroupNormModule(nn.Module): + def __init__(self): + super().__init__() + self.gn = nn.GroupNorm(num_groups=1, num_channels=4) + + def forward(self, x): + return self.gn(x) + + raw_data = np.random.randn(2, 4, 8, 8).astype(np.float32) + torch_module = GroupNormModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_layer_norm_module(target, dev): + class LayerNormModule(nn.Module): + def __init__(self): + super().__init__() + self.ln = nn.LayerNorm(normalized_shape=8) + + def forward(self, x): + return self.ln(x) + + raw_data = np.random.randn(2, 4, 8).astype(np.float32) + torch_module = LayerNormModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_linear_module(target, dev): + class LinearModule(nn.Module): + def __init__(self): + super().__init__() + self.linear = nn.Linear(10, 5) + + def forward(self, x): + return self.linear(x) + + raw_data = np.random.randn(4, 10).astype(np.float32) + torch_module = LinearModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_max_pool2d_module(target, dev): + class MaxPool2dModule(nn.Module): + def __init__(self): + super().__init__() + self.pool = nn.MaxPool2d(kernel_size=2) + + def forward(self, x): + return self.pool(x) + + raw_data = np.random.randn(2, 3, 8, 8).astype(np.float32) + torch_module = MaxPool2dModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_embedding_module(target, dev): + class EmbeddingModule(nn.Module): + def __init__(self): + super().__init__() + self.embed = nn.Embedding(num_embeddings=10, embedding_dim=3) + + def forward(self, x): + return self.embed(x) + + raw_data = np.random.randint(0, 10, (2, 4)).astype(np.int64) + torch_module = EmbeddingModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_flatten_module(target, dev): + class FlattenModule(nn.Module): + def __init__(self): + super().__init__() + self.flatten = nn.Flatten() + + def forward(self, x): + return self.flatten(x) + + raw_data = np.random.randn(2, 3, 4, 5).astype(np.float32) + torch_module = FlattenModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_numel(target, dev): + class NumelModule(nn.Module): + def forward(self, x): + return torch.tensor(x.numel()) + + raw_data = np.random.randn(2, 3, 4).astype(np.float32) + torch_module = NumelModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_size(target, dev): + class SizeModule(nn.Module): + def forward(self, x): + return torch.tensor(x.size(0)) + + raw_data = np.random.randn(5, 4).astype(np.float32) + torch_module = SizeModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_tensor(target, dev): + class TensorModule(nn.Module): + def forward(self, x): + return torch.tensor([1, 2, 3]) + + raw_data = np.zeros((1,)).astype(np.float32) + torch_module = TensorModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_type(target, dev): + class TypeModule(nn.Module): + def forward(self, x): + return x.type(torch.float16) + + raw_data = np.random.randn(2, 3).astype(np.float32) + torch_module = TypeModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_float(target, dev): + class FloatModule(nn.Module): + def forward(self, x): + return x.float() + + raw_data = np.random.randn(2, 3).astype(np.float32) + torch_module = FloatModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_half(target, dev): + class HalfModule(nn.Module): + def forward(self, x): + return x.half() + + raw_data = np.random.randn(2, 3).astype(np.float32) + torch_module = HalfModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_getattr(target, dev): + class GetAttrModule(nn.Module): + def forward(self, x): + # Use getattr to call the ndimension method. + return torch.tensor(getattr(x, "ndimension")()) + + raw_data = np.random.randn(2, 3, 4).astype(np.float32) + torch_module = GetAttrModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_sym_size_int(target, dev): + class SymSizeIntModule(nn.Module): + def forward(self, x): + return torch.tensor(x.shape[1]) + + raw_data = np.random.randn(2, 3, 4).astype(np.float32) + torch_module = SymSizeIntModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +@tvm.testing.parametrize_targets("cuda") +def test_interpolate(target, dev): + class InterpolateModule(nn.Module): + def forward(self, x): + # Upsample to a fixed size. + return F.interpolate(x, size=(16, 16), mode="nearest") + + raw_data = np.random.randn(2, 3, 8, 8).astype(np.float32) + torch_module = InterpolateModule().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + if __name__ == "__main__": tvm.testing.main() From 8897b2861612c0a641adc128a6ceea6d08ef2f01 Mon Sep 17 00:00:00 2001 From: Hugo Latendresse Date: Mon, 28 Apr 2025 11:01:36 -0400 Subject: [PATCH 3/4] add new tests from 17862 --- tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py b/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py index f68b5a234341..500e52151537 100644 --- a/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py +++ b/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py @@ -21,11 +21,13 @@ import numpy as np import torch from torch import nn +from torch.nn import functional as F from torch.export import export from tvm.relax.frontend.torch import from_exported_program from torch.nn import Softmax, Upsample + def assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev): """ This util ensures that a torch module can successfully be exported to TVM From 2a9615370c63b07951dbf3a5675f08925262ccc6 Mon Sep 17 00:00:00 2001 From: Hugo Latendresse Date: Mon, 28 Apr 2025 11:06:31 -0400 Subject: [PATCH 4/4] whitespace --- tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py b/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py index 500e52151537..0184688a9e84 100644 --- a/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py +++ b/tests/python/nightly/test_nnapi/test_from_exported_to_cuda.py @@ -27,7 +27,6 @@ from torch.nn import Softmax, Upsample - def assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev): """ This util ensures that a torch module can successfully be exported to TVM @@ -1054,5 +1053,6 @@ def forward(self, x): torch_module = InterpolateModule().eval() assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + if __name__ == "__main__": tvm.testing.main()