From 369069ebeea634f1ef4df2022ab3a5c7119fe2ea Mon Sep 17 00:00:00 2001 From: Hugo Latendresse Date: Sun, 30 Mar 2025 11:58:50 -0400 Subject: [PATCH 1/2] cherry pick from arange branch --- .../torch/exported_program_translator.py | 5 +++- .../relax/test_from_exported_to_cuda.py | 26 +++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/python/tvm/relax/frontend/torch/exported_program_translator.py b/python/tvm/relax/frontend/torch/exported_program_translator.py index 97ccc6393cbb..70c236c33a3e 100644 --- a/python/tvm/relax/frontend/torch/exported_program_translator.py +++ b/python/tvm/relax/frontend/torch/exported_program_translator.py @@ -390,9 +390,11 @@ def create_convert_map( "reshape.default": self._reshape, # tensor creation "_to_copy.default": self._to_copy, + "arange.default": self._arange, + "arange.start": self._arange, + "arange.start_step": self._arange, "detach.default": self._detach, "detach_.default": self._detach, - "arange.start": self._arange, "contiguous.default": lambda node: self.env[node.args[0]], # no-op "clone.default": lambda node: self.env[node.args[0]], "empty.memory_format": self._empty, @@ -492,6 +494,7 @@ def from_exported_program( assert ( func_name in self.convert_map ), f"Unsupported function type {func_name}" + print('found function!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!', func_name) self.env[node] = self.convert_map[func_name](node) else: raise ValueError(f"Unsupported op {node.op}") diff --git a/tests/python/relax/test_from_exported_to_cuda.py b/tests/python/relax/test_from_exported_to_cuda.py index 19b8f80a2390..84a656b6ccbe 100644 --- a/tests/python/relax/test_from_exported_to_cuda.py +++ b/tests/python/relax/test_from_exported_to_cuda.py @@ -466,6 +466,32 @@ def forward(self, x): torch_module = ChunkModel(chunks=chunks, dim=dim).eval() assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) +@tvm.testing.parametrize_targets("cuda") +def test_arange_default(target, dev): + raw_data = np.random.rand(5).astype("int64") + + class ArangeModel(nn.Module): + def forward(self, x): + return x + torch.arange(5) + + torch_module = ArangeModel().eval() + + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + + +# TODO +# @tvm.testing.parametrize_targets("cuda") +# def test_arange_start_step(target, dev): +# raw_data = np.random.rand(3).astype("int64") + +# class ArangeModel(nn.Module): +# def forward(self, x): +# return x + torch.arange(1, 2.5, 0.5) + +# torch_module = ArangeModel().eval() + +# assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + @tvm.testing.parametrize_targets("cuda") def test_index_select(target, dev): From 5dd9f3fed61bf4f02a82543c3c8d2ac25b2b7046 Mon Sep 17 00:00:00 2001 From: Hugo Latendresse Date: Wed, 2 Apr 2025 13:59:32 -0400 Subject: [PATCH 2/2] cleanup + unit test --- .../torch/exported_program_translator.py | 1 - .../relax/test_from_exported_to_cuda.py | 35 +++++++++++-------- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/python/tvm/relax/frontend/torch/exported_program_translator.py b/python/tvm/relax/frontend/torch/exported_program_translator.py index 70c236c33a3e..50c29397d7ee 100644 --- a/python/tvm/relax/frontend/torch/exported_program_translator.py +++ b/python/tvm/relax/frontend/torch/exported_program_translator.py @@ -494,7 +494,6 @@ def from_exported_program( assert ( func_name in self.convert_map ), f"Unsupported function type {func_name}" - print('found function!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!', func_name) self.env[node] = self.convert_map[func_name](node) else: raise ValueError(f"Unsupported op {node.op}") diff --git a/tests/python/relax/test_from_exported_to_cuda.py b/tests/python/relax/test_from_exported_to_cuda.py index 84a656b6ccbe..56ee527caf09 100644 --- a/tests/python/relax/test_from_exported_to_cuda.py +++ b/tests/python/relax/test_from_exported_to_cuda.py @@ -466,31 +466,38 @@ def forward(self, x): torch_module = ChunkModel(chunks=chunks, dim=dim).eval() assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + @tvm.testing.parametrize_targets("cuda") -def test_arange_default(target, dev): - raw_data = np.random.rand(5).astype("int64") +def test_arange(target, dev): + # arange.default + raw_data = np.array([0, 0, 0, 0, 0]) - class ArangeModel(nn.Module): + class ArangeDefaultModel(nn.Module): def forward(self, x): return x + torch.arange(5) - torch_module = ArangeModel().eval() - + torch_module = ArangeDefaultModel().eval() assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + # arange.start + raw_data = np.array([0, 0, 0]) -# TODO -# @tvm.testing.parametrize_targets("cuda") -# def test_arange_start_step(target, dev): -# raw_data = np.random.rand(3).astype("int64") + class ArangeStartModel(nn.Module): + def forward(self, x): + return x + torch.arange(1, 4) -# class ArangeModel(nn.Module): -# def forward(self, x): -# return x + torch.arange(1, 2.5, 0.5) + torch_module = ArangeStartModel().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) -# torch_module = ArangeModel().eval() + # arange.start_step + raw_data = np.array([0.0, 0.0, 0.0], dtype=np.float32) -# assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) + class ArangeStartStopModel(nn.Module): + def forward(self, x): + return x + torch.arange(1, 2.5, 0.5, dtype=torch.float32) + + torch_module = ArangeStartStopModel().eval() + assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, target, dev) @tvm.testing.parametrize_targets("cuda")