From 85a4e924e8477fb8cc2e70c3b4b9abca808b16fb Mon Sep 17 00:00:00 2001 From: deivanayakisankaralingam Date: Tue, 6 May 2025 07:16:09 +0000 Subject: [PATCH 1/4] add avg pool 1d and 3d op mappings and added test script --- .../torch/base_fx_graph_translator.py | 93 +++- .../torch/exported_program_translator.py | 2 + .../tvm/relax/frontend/torch/fx_translator.py | 25 + python/tvm/relax/op/nn/nn.py | 6 +- .../test_frontend_from_exported_program.py | 186 +++++++ tests/python/relax/test_frontend_from_fx.py | 183 +++++++ tests/python/relax/test_op_nn_pooling.py | 455 ++++++++++++++++++ 7 files changed, 946 insertions(+), 4 deletions(-) diff --git a/python/tvm/relax/frontend/torch/base_fx_graph_translator.py b/python/tvm/relax/frontend/torch/base_fx_graph_translator.py index 5ae05ab89160..34694bf5e32d 100644 --- a/python/tvm/relax/frontend/torch/base_fx_graph_translator.py +++ b/python/tvm/relax/frontend/torch/base_fx_graph_translator.py @@ -580,6 +580,48 @@ def _addmm(self, node: fx.Node) -> relax.Var: res = bias if res is None else self.block_builder.emit(relax.op.add(bias, res)) return res + def _avg_pool1d_impl( + self, + x: relax.Expr, + kernel_size: Union[int, Tuple[int]] = 1, + stride: Optional[Union[int, Tuple[int]]] = None, + padding: Optional[int] = 0, + ceil_mode: Optional[bool] = False, + count_include_pad: Optional[bool] = True, + ) -> relax.Var: + + x_ndim = x.struct_info.ndim + if x_ndim == 2: + x = relax.op.expand_dims(x, axis=0) + stride = kernel_size if stride is None or stride == [] else stride + + result = self.block_builder.emit( + relax.op.nn.avg_pool1d( + x, + pool_size=kernel_size, + strides=stride, + padding=padding, + ceil_mode=ceil_mode, + count_include_pad=count_include_pad, + layout="NCW", + ) + ) + + if x_ndim == 2: + result = relax.op.squeeze(result, axis=[0]) + return result + + def _avg_pool1d(self, node: fx.Node) -> relax.Var: + args, kwargs = node.normalized_arguments(node) + x = self.env[args[0]] + kernel_size = args[1] if len(args) > 1 else kwargs["kernel_size"] + stride = args[2] if len(args) > 2 else kwargs.get("stride", None) + padding = args[3] if len(args) > 3 else kwargs.get("padding", 0) + ceil_mode = args[4] if len(args) > 4 else kwargs.get("ceil_mode", False) + count_include_pad = args[5] if len(args) > 5 else kwargs.get("count_include_pad", True) + + return self._avg_pool1d_impl(x, kernel_size, stride, padding, ceil_mode, count_include_pad) + def _avg_pool2d_impl( self, x: relax.Expr, @@ -588,8 +630,12 @@ def _avg_pool2d_impl( padding: Optional[int] = 0, ceil_mode: Optional[bool] = False, ) -> relax.Var: + x_ndim = x.struct_info.ndim + if x_ndim == 3: + x = relax.op.expand_dims(x, axis=0) stride = kernel_size if stride is None or stride == [] else stride - return self.block_builder.emit( + + result = self.block_builder.emit( relax.op.nn.avg_pool2d( x, pool_size=kernel_size, @@ -600,6 +646,10 @@ def _avg_pool2d_impl( ) ) + if x_ndim == 3: + result = relax.op.squeeze(result, axis=[0]) + return result + def _avg_pool2d(self, node: fx.Node) -> relax.Var: args, kwargs = node.normalized_arguments(node) x = self.env[args[0]] @@ -609,6 +659,47 @@ def _avg_pool2d(self, node: fx.Node) -> relax.Var: ceil_mode = args[4] if len(args) > 4 else kwargs.get("ceil_mode", False) return self._avg_pool2d_impl(x, kernel_size, stride, padding, ceil_mode) + def _avg_pool3d_impl( + self, + x: relax.Expr, + kernel_size: Union[int, Tuple[int, int, int]] = (1, 1, 1), + stride: Optional[Union[int, Tuple[int, int, int]]] = None, + padding: Optional[int] = 0, + ceil_mode: Optional[bool] = False, + count_include_pad: Optional[bool] = True, + ) -> relax.Var: + x_ndim = x.struct_info.ndim + if x_ndim == 4: + x = relax.op.expand_dims(x, axis=0) + stride = kernel_size if stride is None or stride == [] else stride + + result = self.block_builder.emit( + relax.op.nn.avg_pool3d( + x, + pool_size=kernel_size, + strides=stride, + padding=padding, + ceil_mode=ceil_mode, + count_include_pad=count_include_pad, + layout="NCDHW", + ) + ) + + if x_ndim == 4: + result = relax.op.squeeze(result, axis=[0]) + return result + + def _avg_pool3d(self, node: fx.Node) -> relax.Var: + args, kwargs = node.normalized_arguments(node) + x = self.env[args[0]] + kernel_size = args[1] if len(args) > 1 else kwargs["kernel_size"] + stride = args[2] if len(args) > 2 else kwargs.get("stride", None) + padding = args[3] if len(args) > 3 else kwargs.get("padding", 0) + ceil_mode = args[4] if len(args) > 4 else kwargs.get("ceil_mode", False) + count_include_pad = args[5] if len(args) > 5 else kwargs.get("count_include_pad", True) + + return self._avg_pool3d_impl(x, kernel_size, stride, padding, ceil_mode, count_include_pad) + def _baddbmm(self, node: fx.Node) -> relax.Var: x = self.env[node.args[0]] batch1 = self.env[node.args[1]] diff --git a/python/tvm/relax/frontend/torch/exported_program_translator.py b/python/tvm/relax/frontend/torch/exported_program_translator.py index dbe37b886017..fc37fd3fb9a6 100644 --- a/python/tvm/relax/frontend/torch/exported_program_translator.py +++ b/python/tvm/relax/frontend/torch/exported_program_translator.py @@ -401,7 +401,9 @@ def create_convert_map( "adaptive_avg_pool2d.default": self._adaptive_avg_pool2d, "adaptive_avg_pool3d.default": self._adaptive_avg_pool3d, "addmm.default": self._addmm, + "avg_pool1d.default": self._avg_pool1d, "avg_pool2d.default": self._avg_pool2d, + "avg_pool3d.default": self._avg_pool3d, "baddbmm.default": self._baddbmm, "bmm.default": self._binary_op( partial(relax.op.linear_algebra.matmul, out_dtype="float32"), operator.matmul diff --git a/python/tvm/relax/frontend/torch/fx_translator.py b/python/tvm/relax/frontend/torch/fx_translator.py index 8081a98f59ca..cb3816879b24 100644 --- a/python/tvm/relax/frontend/torch/fx_translator.py +++ b/python/tvm/relax/frontend/torch/fx_translator.py @@ -230,6 +230,15 @@ def _adaptive_avg_pool3d_module(self, node: fx.Node) -> relax.Var: result = relax.op.squeeze(result, axis=[0]) return result + def _avg_pool1d_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + kernel_size = module.kernel_size + stride = module.stride + padding = module.padding + ceil_mode = module.ceil_mode + return self._avg_pool1d_impl(x, kernel_size, stride, padding, ceil_mode) + def _avg_pool2d_module(self, node: fx.Node) -> relax.Var: x = self.env[node.args[0]] module = self.named_modules[node.target] @@ -239,6 +248,15 @@ def _avg_pool2d_module(self, node: fx.Node) -> relax.Var: ceil_mode = module.ceil_mode return self._avg_pool2d_impl(x, kernel_size, stride, padding, ceil_mode) + def _avg_pool3d_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + kernel_size = module.kernel_size + stride = module.stride + padding = module.padding + ceil_mode = module.ceil_mode + return self._avg_pool3d_impl(x, kernel_size, stride, padding, ceil_mode) + def _batch_norm_2d_module(self, node: fx.Node) -> relax.Var: x = self.env[node.args[0]] module = self.named_modules[node.target] @@ -710,8 +728,13 @@ def create_convert_map( # neural network nn.AdaptiveAvgPool1d: self._adaptive_avg_pool1d_module, nn.AdaptiveAvgPool2d: self._adaptive_avg_pool2d_module, +<<<<<<< HEAD nn.AdaptiveAvgPool3d: self._adaptive_avg_pool3d_module, +======= + nn.AvgPool1d: self._avg_pool1d_module, +>>>>>>> 88cf23de1 (add avg pool 1d and 3d op mappings and added test script) nn.AvgPool2d: self._avg_pool2d_module, + nn.AvgPool3d: self._avg_pool3d_module, nn.BatchNorm2d: self._batch_norm_2d_module, nn.Conv1d: self._conv1d_module, nn.Conv2d: self._conv2d_module, @@ -824,7 +847,9 @@ def create_convert_map( "adaptive_avg_pool2d": self._adaptive_avg_pool2d, "adaptive_avg_pool3d": self._adaptive_avg_pool3d, "addmm": self._addmm, + "avg_pool1d": self._avg_pool1d, "avg_pool2d": self._avg_pool2d, + "avg_pool3d": self._avg_pool3d, "baddbmm": self._baddbmm, "bmm": self._binary_op( partial(relax.op.linear_algebra.matmul, out_dtype="float32"), operator.matmul diff --git a/python/tvm/relax/op/nn/nn.py b/python/tvm/relax/op/nn/nn.py index e234e8ad7b18..44da895eaca5 100644 --- a/python/tvm/relax/op/nn/nn.py +++ b/python/tvm/relax/op/nn/nn.py @@ -840,7 +840,7 @@ def avg_pool1d( padding: Union[int, Tuple[int, ...]] = (0, 0), dilation: Union[int, Tuple[int, int]] = (1,), ceil_mode: bool = False, - count_include_pad: bool = False, + count_include_pad: bool = True, layout: str = "NCW", out_layout: Optional[str] = None, ) -> Expr: @@ -920,7 +920,7 @@ def avg_pool2d( padding: Union[int, Tuple[int, ...]] = (0, 0), dilation: Union[int, Tuple[int, int]] = (1, 1), ceil_mode: bool = False, - count_include_pad: bool = False, + count_include_pad: bool = True, layout: str = "NCHW", out_layout: Optional[str] = None, ) -> Expr: @@ -1008,7 +1008,7 @@ def avg_pool3d( padding: Union[int, Tuple[int, ...]] = (0, 0, 0), dilation: Union[int, Tuple[int, int]] = (1, 1, 1), ceil_mode: bool = False, - count_include_pad: bool = False, + count_include_pad: bool = True, layout: str = "NCDHW", out_layout: Optional[str] = None, ) -> Expr: diff --git a/tests/python/relax/test_frontend_from_exported_program.py b/tests/python/relax/test_frontend_from_exported_program.py index ef198d2f83f3..b29b5d44b46e 100644 --- a/tests/python/relax/test_frontend_from_exported_program.py +++ b/tests/python/relax/test_frontend_from_exported_program.py @@ -1367,6 +1367,100 @@ def main( verify_model(Addmm2(), example_args, {}, expected2) +def test_avg_pool1d(): + class AvgPool1d1(Module): + def __init__(self): + super().__init__() + self.pool = torch.nn.AvgPool1d(kernel_size=1) + + def forward(self, input): + return self.pool(input) + + @tvm.script.ir_module + class expected1: + @R.function + def main(input_1: R.Tensor((1, 3, 10), dtype="float32")) -> R.Tuple(R.Tensor((1, 3, 10), dtype="float32")): + with R.dataflow(): + lv: R.Tensor((1, 3, 10), dtype="float32") = R.nn.avg_pool1d( + input_1, + pool_size=[1], + strides=[1], + dilation=[1], + padding=[0, 0], ceil_mode=False, count_include_pad=True, + layout="NCW", + out_layout="NCW", + ) + gv: R.Tuple(R.Tensor((1, 3, 10), dtype="float32")) = (lv,) + R.output(gv) + return gv + + class AvgPool1d2(Module): + def __init__(self): + super().__init__() + self.pool = torch.nn.AvgPool1d(kernel_size=3, stride=2, padding=1, ceil_mode=True) + + def forward(self, input): + return self.pool(input) + + class AvgPool1d3(Module): + def forward(self, input): + return torch.nn.functional.avg_pool1d( + input, kernel_size=3, stride=2, padding=1, ceil_mode=True + ) + + @tvm.script.ir_module + class expected2: + @R.function + def main(input_1: R.Tensor((1, 3, 10), dtype="float32")): + with R.dataflow(): + lv = R.nn.avg_pool1d( + input_1, + pool_size=[3], + strides=[2], + dilation=[1], + padding=[1, 1], + ceil_mode=True, + count_include_pad=True, + layout="NCW", + out_layout="NCW", + ) + gv = (lv,) + R.output(gv) + return gv + + class AvgPool1d4(Module): + def forward(self, input): + return torch.nn.functional.avg_pool1d( + input, kernel_size=2, stride=2, padding=0 + ) + + @tvm.script.ir_module + class expected3: + @R.function + def main(input_1: R.Tensor((1, 3, 10), dtype="float32")): + with R.dataflow(): + lv = R.nn.avg_pool1d( + input_1, + pool_size=[2], + strides=[2], + dilation=[1], + padding=[0, 0], + ceil_mode=False, + count_include_pad=True, + layout="NCW", + out_layout="NCW", + ) + gv = (lv,) + R.output(gv) + return gv + + example_args = (torch.randn(1, 3, 10, dtype=torch.float32),) + verify_model(AvgPool1d1(), example_args, {}, expected1) + verify_model(AvgPool1d2(), example_args, {}, expected2) + verify_model(AvgPool1d3(), example_args, {}, expected2) + verify_model(AvgPool1d4(), example_args, {}, expected3) + + def test_avg_pool2d(): class AvgPool2d1(Module): def __init__(self): @@ -1460,6 +1554,98 @@ def main(input_1: R.Tensor((1, 3, 10, 10), dtype="float32")): verify_model(AvgPool2d4(), example_args, {}, expected3) +def test_avg_pool3d(): + class AvgPool3d1(Module): + def __init__(self): + super().__init__() + self.pool = torch.nn.AvgPool3d(kernel_size=1) + + def forward(self, input): + return self.pool(input) + + @tvm.script.ir_module + class expected1: + @R.function + def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")) -> R.Tuple(R.Tensor((1, 3, 8, 8, 8), dtype="float32")): + with R.dataflow(): + lv: R.Tensor((1, 3, 8, 8, 8), dtype="float32") = R.nn.avg_pool3d( + input_1, + pool_size=[1, 1, 1], + strides=[1, 1, 1], + dilation=[1, 1, 1], + padding=[0, 0, 0, 0, 0, 0],ceil_mode=False, count_include_pad=True, + layout="NCDHW", + out_layout="NCDHW", + ) + gv: R.Tuple(R.Tensor((1, 3, 8, 8, 8), dtype="float32")) = (lv,) + R.output(gv) + return gv + + class AvgPool3d2(Module): + def __init__(self): + super().__init__() + self.pool = torch.nn.AvgPool3d(kernel_size=3, stride=2, padding=1, ceil_mode=True) + + def forward(self, input): + return self.pool(input) + + class AvgPool3d3(Module): + def forward(self, input): + return torch.nn.functional.avg_pool3d( + input, kernel_size=3, stride=2, padding=1, ceil_mode=True + ) + + @tvm.script.ir_module + class expected2: + @R.function + def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")): + with R.dataflow(): + lv = R.nn.avg_pool3d( + input_1, + pool_size=[3, 3, 3], + strides=[2, 2, 2], + dilation=[1, 1, 1], + padding=[1, 1, 1, 1, 1, 1], + ceil_mode=True,count_include_pad=True, + layout="NCDHW", + out_layout="NCDHW", + ) + gv = (lv,) + R.output(gv) + return gv + + class AvgPool3d4(Module): + def forward(self, input): + return torch.nn.functional.avg_pool3d( + input, kernel_size=[2, 1, 2], stride=[2, 1, 2], divisor_override=4 + ) + + @tvm.script.ir_module + class expected3: + @R.function + def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")): + with R.dataflow(): + lv = R.nn.avg_pool3d( + input_1, + pool_size=[2, 1, 2], + strides=[2, 1, 2], + dilation=[1, 1, 1], + padding=[0, 0, 0, 0, 0, 0], + ceil_mode=False,count_include_pad=True, + layout="NCDHW", + out_layout="NCDHW", + ) + gv = (lv,) + R.output(gv) + return gv + + example_args = (torch.randn(1, 3, 8, 8, 8, dtype=torch.float32),) + verify_model(AvgPool3d1(), example_args, {}, expected1) + verify_model(AvgPool3d2(), example_args, {}, expected2) + verify_model(AvgPool3d3(), example_args, {}, expected2) + verify_model(AvgPool3d4(), example_args, {}, expected3) + + def test_baddbmm(): class BAddBMM1(Module): def __init__(self): diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index 681474244ae8..27e4c55d3fc9 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -1187,6 +1187,7 @@ def main( verify_model(MaxPool2d3(), input_info, {}, expected3) +<<<<<<< HEAD def test_maxpool3d(): input_info = [([1, 3, 10, 10, 10], "float32")] @@ -1194,10 +1195,20 @@ class MaxPool3d(Module): def __init__(self): super().__init__() self.pool = torch.nn.MaxPool3d(kernel_size=[1, 1, 1]) +======= +def test_avgpool1d(): + input_info = [([1, 3, 10], "float32")] + + class AvgPool1d(Module): + def __init__(self): + super().__init__() + self.pool = torch.nn.AvgPool1d(kernel_size=1) +>>>>>>> d7440a840 (add avg pool 1d and 3d op mappings and added test script) def forward(self, input): return self.pool(input) +<<<<<<< HEAD class MaxPool3d_functional(Module): def __init__(self): super().__init__() @@ -1229,10 +1240,35 @@ class MaxPool3d2(Module): def __init__(self): super().__init__() self.pool = torch.nn.MaxPool3d(kernel_size=[2, 2, 2], dilation=[1, 2, 2]) +======= + @tvm.script.ir_module + class expected1: + @R.function + def main(input_1: R.Tensor((1, 3, 10), dtype="float32")) -> R.Tensor((1, 3, 10), dtype="float32"): + with R.dataflow(): + lv = R.nn.avg_pool1d( + input_1, + pool_size=[1], + strides=[1], + dilation=[1], + padding=[0, 0], ceil_mode=False, count_include_pad=True, + layout="NCW", + out_layout="NCW", + ) + gv = lv + R.output(gv) + return gv + + class AvgPool1d2(Module): + def __init__(self): + super().__init__() + self.pool = torch.nn.AvgPool1d(kernel_size=4, stride=2, padding=2, ceil_mode=True) +>>>>>>> d7440a840 (add avg pool 1d and 3d op mappings and added test script) def forward(self, input): return self.pool(input) +<<<<<<< HEAD @tvm.script.ir_module class expected2: @R.function @@ -1260,10 +1296,41 @@ def __init__(self): def forward(self, input): return self.pool(input) +======= + class AvgPool1d3(Module): + def forward(self, input): + return torch.nn.functional.avg_pool1d( + input, kernel_size=4, stride=2, padding=2, ceil_mode=True + ) + + @tvm.script.ir_module + class expected2: + @R.function + def main(input_1: R.Tensor((1, 3, 10), dtype="float32")): + with R.dataflow(): + lv = R.nn.avg_pool1d( + input_1, + pool_size=[4], + strides=[2], + dilation=[1], + padding=[2, 2], + ceil_mode=True,count_include_pad=True, + layout="NCW", + out_layout="NCW", + ) + gv = lv + R.output(gv) + return gv + + class AvgPool1d4(Module): + def forward(self, input): + return torch.nn.functional.avg_pool1d(input, kernel_size=2) +>>>>>>> d7440a840 (add avg pool 1d and 3d op mappings and added test script) @tvm.script.ir_module class expected3: @R.function +<<<<<<< HEAD def main( input_1: R.Tensor((1, 3, 10, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 5, 5, 5), dtype="float32"): @@ -1285,6 +1352,28 @@ def main( verify_model(MaxPool3d_functional(), input_info, {}, expected1) verify_model(MaxPool3d2(), input_info, {}, expected2) verify_model(MaxPool3d3(), input_info, {}, expected3) +======= + def main(input_1: R.Tensor((1, 3, 10), dtype="float32")): + with R.dataflow(): + lv = R.nn.avg_pool1d( + input_1, + pool_size=[2], + strides=[2], + dilation=[1], + padding=[0, 0], + ceil_mode=False, count_include_pad=True, + layout="NCW", + out_layout="NCW", + ) + gv = lv + R.output(gv) + return gv + + verify_model(AvgPool1d(), input_info, {}, expected1) + verify_model(AvgPool1d2(), input_info, {}, expected2) + verify_model(AvgPool1d3(), input_info, {}, expected2) + verify_model(AvgPool1d4(), input_info, {}, expected3) +>>>>>>> d7440a840 (add avg pool 1d and 3d op mappings and added test script) def test_avgpool2d(): @@ -1381,6 +1470,7 @@ def main(input_1: R.Tensor((1, 3, 10, 10), dtype="float32")): verify_model(AvgPool2d4(), input_info, {}, expected3) +<<<<<<< HEAD def test_adaptive_avgpool1d(): input_info = [([1, 3, 16], "float32")] @@ -1388,10 +1478,20 @@ class AdaptiveAvgPool1d0(torch.nn.Module): def __init__(self): super().__init__() self.pool = torch.nn.AdaptiveAvgPool1d(8) +======= +def test_avgpool3d(): + input_info = [([1, 3, 8, 8, 8], "float32")] + + class AvgPool3d(Module): + def __init__(self): + super().__init__() + self.pool = torch.nn.AvgPool3d(kernel_size=[1, 1, 1]) +>>>>>>> 88cf23de1 (add avg pool 1d and 3d op mappings and added test script) def forward(self, input): return self.pool(input) +<<<<<<< HEAD class AdaptiveAvgPool1d1(torch.nn.Module): def forward(self, input): return torch.nn.functional.adaptive_avg_pool1d(input, 8) @@ -1412,6 +1512,89 @@ def main( verify_model(AdaptiveAvgPool1d0(), input_info, {}, expected1) verify_model(AdaptiveAvgPool1d1(), input_info, {}, expected1) +======= + @tvm.script.ir_module + class expected1: + @R.function + def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")) -> R.Tensor((1, 3, 8, 8, 8), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((1, 3, 8, 8, 8), dtype="float32") = R.nn.avg_pool3d( + input_1, + pool_size=[1, 1, 1], + strides=[1, 1, 1], + dilation=[1, 1, 1], + padding=[0, 0, 0, 0, 0, 0], + ceil_mode=False, count_include_pad=True, + layout="NCDHW", + out_layout="NCDHW", + ) + gv: R.Tensor((1, 3, 8, 8, 8), dtype="float32") = lv + R.output(gv) + return gv + + class AvgPool3d2(Module): + def __init__(self): + super().__init__() + self.pool = torch.nn.AvgPool3d(kernel_size=[3, 3, 3], stride=2, padding=1, ceil_mode=True) + + def forward(self, input): + return self.pool(input) + + class AvgPool3d3(Module): + def forward(self, input): + return torch.nn.functional.avg_pool3d( + input, kernel_size=[3, 3, 3], stride=2, padding=1, ceil_mode=True + ) + + @tvm.script.ir_module + class expected2: + @R.function + def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")): + with R.dataflow(): + lv = R.nn.avg_pool3d( + input_1, + pool_size=[3, 3, 3], + strides=[2, 2, 2], + dilation=[1, 1, 1], + padding=[1, 1, 1, 1, 1, 1], + ceil_mode=True, + count_include_pad=True, + layout="NCDHW", + out_layout="NCDHW", + ) + gv = lv + R.output(gv) + return gv + + class AvgPool3d4(Module): + def forward(self, input): + return torch.nn.functional.avg_pool3d(input, kernel_size=[2, 1, 2], stride=[2, 1, 2], divisor_override=4) + + @tvm.script.ir_module + class expected3: + @R.function + def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")): + with R.dataflow(): + lv = R.nn.avg_pool3d( + input_1, + pool_size=[2, 1, 2], + strides=[2, 1, 2], + dilation=[1, 1, 1], + padding=[0, 0, 0, 0, 0, 0], + ceil_mode=False, + count_include_pad=True, + layout="NCDHW", + out_layout="NCDHW", + ) + gv = lv + R.output(gv) + return gv + + verify_model(AvgPool3d(), input_info, {}, expected1) + verify_model(AvgPool3d2(), input_info, {}, expected2) + verify_model(AvgPool3d3(), input_info, {}, expected2) + verify_model(AvgPool3d4(), input_info, {}, expected3) +>>>>>>> 88cf23de1 (add avg pool 1d and 3d op mappings and added test script) def test_adaptive_avgpool2d(): diff --git a/tests/python/relax/test_op_nn_pooling.py b/tests/python/relax/test_op_nn_pooling.py index 846338a93781..107be0c7af0e 100644 --- a/tests/python/relax/test_op_nn_pooling.py +++ b/tests/python/relax/test_op_nn_pooling.py @@ -713,6 +713,214 @@ def test_max_pool3d_infer_struct_info_wrong_input_type(): bb.normalize(relax.op.nn.max_pool3d(x1)) +def test_avg_pool1d_infer_struct_info(): + bb = relax.BlockBuilder() + vdev0 = VDevice("llvm") + x0 = relax.Var("x", R.Tensor((2, 3, 32), "float32")) + x1 = relax.Var("x", R.Tensor((2, 32, 3), "float32")) + x2 = relax.Var("x", R.Tensor("float32", ndim=3)) + x3 = relax.Var("x", R.Tensor("float32")) + x4 = relax.Var("x", R.Tensor(ndim=3)) + x5 = relax.Var("x", R.Tensor()) + x6 = relax.Var("x", R.Tensor((2, 4, 32, 16), "float32")) + x7 = relax.Var("x", R.Tensor((2, 3, 32), "float32", vdev0)) + + _check_inference(bb, relax.op.nn.avg_pool1d(x0), relax.TensorStructInfo((2, 3, 32), "float32")) + _check_inference( + bb, relax.op.nn.avg_pool1d(x7), relax.TensorStructInfo((2, 3, 32), "float32", vdev0) + ) + _check_inference( + bb, + relax.op.nn.avg_pool1d(x0, pool_size=3), + relax.TensorStructInfo((2, 3, 30), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool1d(x0, padding=1), + relax.TensorStructInfo((2, 3, 34), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool1d(x0, padding=[1, 2]), + relax.TensorStructInfo((2, 3, 35), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool1d(x0, strides=2), + relax.TensorStructInfo((2, 3, 16), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool1d(x0, dilation=2), + relax.TensorStructInfo((2, 3, 32), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool1d(x1, layout="NWC"), + relax.TensorStructInfo((2, 32, 3), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool1d(x0, out_layout="NWC"), + relax.TensorStructInfo((2, 32, 3), "float32"), + ) + _check_inference( + bb, relax.op.nn.avg_pool1d(x2), relax.TensorStructInfo(dtype="float32", ndim=3) + ) + _check_inference( + bb, relax.op.nn.avg_pool1d(x3), relax.TensorStructInfo(dtype="float32", ndim=3) + ) + _check_inference(bb, relax.op.nn.avg_pool1d(x4), relax.TensorStructInfo(dtype="", ndim=3)) + _check_inference(bb, relax.op.nn.avg_pool1d(x5), relax.TensorStructInfo(dtype="", ndim=3)) + + +def test_avg_pool1d_infer_struct_info_shape_symbolic(): + bb = relax.BlockBuilder() + n = tir.Var("n", "int64") + c = tir.Var("c", "int64") + c16 = tir.Var("c16", "int64") + iw = tir.Var("iw", "int64") + x0 = relax.Var("x", R.Tensor((n, c, iw), "float32")) + x1 = relax.Var("x", R.Tensor((n, c, iw, c16), "float32")) + + _check_inference( + bb, + relax.op.nn.avg_pool1d(x0, pool_size=3, strides=3, padding=2, dilation=2), + relax.TensorStructInfo( + ( + n, + c, + tvm.tir.floordiv(iw - 1, 3) + 1, + ), + "float32", + ), + ) + _check_inference( + bb, + relax.op.nn.avg_pool1d(x1, layout="NCW16c", out_layout="NWC"), + relax.TensorStructInfo((n, iw, c * 16), "float32"), + ) + + +def test_avg_pool1d_infer_struct_info_shape_var(): + bb = relax.BlockBuilder() + s0 = relax.Var("s", relax.ShapeStructInfo(ndim=3)) + s1 = relax.Var("s", relax.ShapeStructInfo(ndim=4)) + s2 = relax.Var("s", relax.ShapeStructInfo()) + x0 = relax.Var("x", relax.TensorStructInfo(s0, "float32")) + x1 = relax.Var("x", relax.TensorStructInfo(s1, "float32")) + x2 = relax.Var("x", relax.TensorStructInfo(s2, "float32")) + + _check_inference( + bb, relax.op.nn.avg_pool1d(x0), relax.TensorStructInfo(dtype="float32", ndim=3) + ) + _check_inference( + bb, + relax.op.nn.avg_pool1d(x1, layout="NCW16c"), + relax.TensorStructInfo(dtype="float32", ndim=4), + ) + _check_inference( + bb, + relax.op.nn.avg_pool1d(x2), + relax.TensorStructInfo(dtype="float32", ndim=3), + ) + + +def test_avg_pool1d_infer_struct_info_ceil_mode(): + bb = relax.BlockBuilder() + x = relax.Var("x", R.Tensor((2, 3, 32), "float32")) + + _check_inference( + bb, + relax.op.nn.avg_pool1d(x, pool_size=3, strides=2, ceil_mode=True), + relax.TensorStructInfo((2, 3, 16), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool1d(x, pool_size=5, strides=2, ceil_mode=True), + relax.TensorStructInfo((2, 3, 15), "float32"), + ) + + +def test_avg_pool1d_infer_struct_info_ceil_mode_symbolic(): + bb = relax.BlockBuilder() + n = tir.Var("n", "int64") + c = tir.Var("c", "int64") + iw = tir.Var("iw", "int64") + x = relax.Var("x", R.Tensor((n, c, iw), "float32")) + + _check_inference( + bb, + relax.op.nn.avg_pool1d(x, pool_size=3, strides=2, padding=1, dilation=2, ceil_mode=True), + relax.TensorStructInfo( + (n, c, tvm.tir.floordiv(iw, 2)), + "float32", + ), + ) + + +def test_avg_pool1d_infer_struct_info_more_input_dtype(): + bb = relax.BlockBuilder() + x0 = relax.Var("x", R.Tensor((2, 3, 32), "float16")) + x1 = relax.Var("x", R.Tensor((2, 3, 32), "int8")) + x2 = relax.Var("x", R.Tensor((2, 3, 32), "int64")) + _check_inference(bb, relax.op.nn.avg_pool1d(x0), relax.TensorStructInfo((2, 3, 32), "float16")) + _check_inference(bb, relax.op.nn.avg_pool1d(x1), relax.TensorStructInfo((2, 3, 32), "int8")) + _check_inference(bb, relax.op.nn.avg_pool1d(x2), relax.TensorStructInfo((2, 3, 32), "int64")) + + +def test_avg_pool1d_stride_padding_dilation_int64(): + x = relax.Var("x", R.Tensor((2, 3, 28), "float32")) + avg_pool1d = relax.op.nn.avg_pool1d(x, 3, strides=1, padding=1, dilation=1) + + assert avg_pool1d.attrs.strides[0].dtype == "int64" + assert avg_pool1d.attrs.padding[0].dtype == "int64" + assert avg_pool1d.attrs.padding[1].dtype == "int64" + assert avg_pool1d.attrs.dilation[0].dtype == "int64" + + +def test_avg_pool1d_wrong_pool_size_strides_padding_dilation_length(): + x = relax.Var("x", R.Tensor((2, 3, 28), "float32")) + with pytest.raises(TVMError): + relax.op.nn.avg_pool1d(x, pool_size=(1, 2)) + with pytest.raises(TVMError): + relax.op.nn.avg_pool1d(x, strides=(1, 2)) + with pytest.raises(TVMError): + relax.op.nn.avg_pool1d(x, padding=(1, 2, 3)) + with pytest.raises(TVMError): + relax.op.nn.avg_pool1d(x, dilation=(1, 2)) + + +def test_avg_pool1d_infer_struct_info_wrong_layout_string(): + bb = relax.BlockBuilder() + x = relax.Var("x", R.Tensor((2, 3, 28), "float32")) + with pytest.raises(TVMError): + bb.normalize(relax.op.nn.avg_pool1d(x, layout="OIW")) + with pytest.raises(TVMError): + bb.normalize(relax.op.nn.avg_pool1d(x, out_layout="OWI")) + + +def test_avg_pool1d_wrong_input_ndim(): + bb = relax.BlockBuilder() + x0 = relax.Var("x", R.Tensor((2, 3, 28, 28), "float32")) + x1 = relax.Var("x", R.Tensor("float32", ndim=2)) + with pytest.raises(TVMError): + bb.normalize(relax.op.nn.avg_pool1d(x0)) + with pytest.raises(TVMError): + bb.normalize(relax.op.nn.avg_pool1d(x1)) + + +def test_avg_pool1d_infer_struct_info_wrong_input_type(): + bb = relax.BlockBuilder() + x0 = relax.Var("x", relax.ShapeStructInfo((2, 3, 28))) + x1 = relax.Var("x", relax.FuncStructInfo([], R.Tensor((2, 3, 28), "float32"))) + + with pytest.raises(TVMError): + bb.normalize(relax.op.nn.avg_pool1d(x0)) + with pytest.raises(TVMError): + bb.normalize(relax.op.nn.avg_pool1d(x1)) + + def test_avg_pool2d_infer_struct_info(): bb = relax.BlockBuilder() vdev0 = VDevice("llvm") @@ -943,6 +1151,253 @@ def test_avg_pool2d_infer_struct_info_wrong_input_type(): bb.normalize(relax.op.nn.avg_pool2d(x1)) +def test_avg_pool3d_infer_struct_info(): + bb = relax.BlockBuilder() + vdev0 = VDevice("llvm") + + x0 = relax.Var("x", R.Tensor((2, 3, 32, 32, 32), "float32")) + x1 = relax.Var("x", R.Tensor((2, 32, 32, 32, 3), "float32")) + x2 = relax.Var("x", R.Tensor("float32", ndim=5)) + x3 = relax.Var("x", R.Tensor("float32")) + x4 = relax.Var("x", R.Tensor(ndim=5)) + x5 = relax.Var("x", R.Tensor()) + x6 = relax.Var("x", R.Tensor((2, 4, 32, 32, 16), "float32")) + x7 = relax.Var("x", R.Tensor((2, 3, 32, 32, 32), "float32", vdev0)) + + _check_inference( + bb, relax.op.nn.avg_pool3d(x0), relax.TensorStructInfo((2, 3, 32, 32, 32), "float32") + ) + _check_inference( + bb, relax.op.nn.avg_pool3d(x7), relax.TensorStructInfo((2, 3, 32, 32, 32), "float32", vdev0) + ) + _check_inference( + bb, + relax.op.nn.avg_pool3d(x0, pool_size=3), + relax.TensorStructInfo((2, 3, 30, 30, 30), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool3d(x0, pool_size=(5, 3, 3)), + relax.TensorStructInfo((2, 3, 28, 30, 30), "float32"), + ) + _check_inference( + bb, relax.op.nn.avg_pool3d(x0, padding=1), relax.TensorStructInfo((2, 3, 34, 34, 34), "float32") + ) + _check_inference( + bb, + relax.op.nn.avg_pool3d(x0, padding=[1, 2, 3]), + relax.TensorStructInfo((2, 3, 34, 36, 35), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool3d(x0, strides=2), + relax.TensorStructInfo((2, 3, 16, 16, 16), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool3d(x0, dilation=2), + relax.TensorStructInfo((2, 3, 32, 32, 32), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool3d(x1, layout="NCDHW"), + relax.TensorStructInfo((2, 32, 32, 32, 3), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool3d(x0, out_layout="NCDHW"), + relax.TensorStructInfo((2, 32, 32, 32, 3), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool3d(x6, layout="NCDHW16c", out_layout="NHWC16c"), + relax.TensorStructInfo((2, 32, 32, 32, 16), "float32"), + ) + _check_inference( + bb, relax.op.nn.avg_pool3d(x2), relax.TensorStructInfo(dtype="float32", ndim=5) + ) + _check_inference( + bb, relax.op.nn.avg_pool3d(x3), relax.TensorStructInfo(dtype="float32", ndim=5) + ) + _check_inference(bb, relax.op.nn.avg_pool3d(x4), relax.TensorStructInfo(dtype="", ndim=5)) + _check_inference(bb, relax.op.nn.avg_pool3d(x5), relax.TensorStructInfo(dtype="", ndim=5)) + + +def test_avg_pool3d_infer_struct_info_shape_symbolic(): + bb = relax.BlockBuilder() + n = tir.Var("n", "int64") + c = tir.Var("c", "int64") + c16 = tir.Var("c16", "int64") + id_ = tir.Var("id", "int64") + ih = tir.Var("ih", "int64") + iw = tir.Var("iw", "int64") + x0 = relax.Var("x", R.Tensor((n, c, id_, ih, iw), "float32")) + x1 = relax.Var("x", R.Tensor((n, c, id_, ih, iw, c16), "float32")) + + _check_inference( + bb, + relax.op.nn.avg_pool3d( + x0, pool_size=(3, 3, 3), strides=(3, 3, 3), padding=(2, 2, 2), dilation=(2, 2, 2) + ), + relax.TensorStructInfo( + ( + n, + c, + tvm.tir.floordiv(id_ - 1, 3) + 1, + tvm.tir.floordiv(ih - 1, 3) + 1, + tvm.tir.floordiv(iw - 1, 3) + 1, + ), + "float32", + ), + ) + _check_inference( + bb, + relax.op.nn.avg_pool3d(x1, layout="NCDHW16c", out_layout="NHWC"), + relax.TensorStructInfo((n, id_, ih, iw, c * 16), "float32"), + ) + + +def test_avg_pool3d_infer_struct_info_shape_var(): + bb = relax.BlockBuilder() + s0 = relax.Var("s", relax.ShapeStructInfo(ndim=5)) + s1 = relax.Var("s", relax.ShapeStructInfo(ndim=6)) + s2 = relax.Var("s", relax.ShapeStructInfo()) + x0 = relax.Var("x", relax.TensorStructInfo(s0, "float32")) + x1 = relax.Var("x", relax.TensorStructInfo(s1, "float32")) + x2 = relax.Var("x", relax.TensorStructInfo(s2, "float32")) + + _check_inference( + bb, relax.op.nn.avg_pool3d(x0), relax.TensorStructInfo(dtype="float32", ndim=5) + ) + _check_inference( + bb, + relax.op.nn.avg_pool3d(x1, layout="NCDHW16c"), + relax.TensorStructInfo(dtype="float32", ndim=6), + ) + _check_inference( + bb, + relax.op.nn.avg_pool3d(x2), + relax.TensorStructInfo(dtype="float32", ndim=5), + ) + + +def test_avg_pool3d_infer_struct_info_ceil_mode(): + bb = relax.BlockBuilder() + x = relax.Var("x", R.Tensor((2, 3, 32, 32, 32), "float32")) + + _check_inference( + bb, + relax.op.nn.avg_pool3d(x, pool_size=3, strides=2, ceil_mode=True), + relax.TensorStructInfo((2, 3, 16, 16, 16), "float32"), + ) + _check_inference( + bb, + relax.op.nn.avg_pool3d(x, pool_size=(5, 3, 3), strides=2, ceil_mode=True), + relax.TensorStructInfo((2, 3, 15, 16, 16), "float32"), + ) + + +def test_avg_pool3d_infer_struct_info_ceil_mode_symbolic(): + bb = relax.BlockBuilder() + n = tir.Var("n", "int64") + c = tir.Var("c", "int64") + id_ = tir.Var("id", "int64") + ih = tir.Var("ih", "int64") + iw = tir.Var("iw", "int64") + x = relax.Var("x", R.Tensor((n, c, id_, ih, iw), "float32")) + + _check_inference( + bb, + relax.op.nn.avg_pool3d( + x, pool_size=(3, 3, 3), strides=(2, 2, 2), padding=(1, 1, 1), dilation=(2, 2, 2), ceil_mode=True + ), + relax.TensorStructInfo( + ( + n, + c, + tvm.tir.floordiv(id_, 2), + tvm.tir.floordiv(ih, 2), + tvm.tir.floordiv(iw, 2), + ), + "float32", + ), + ) + + +def test_avg_pool3d_infer_struct_info_more_input_dtype(): + bb = relax.BlockBuilder() + x0 = relax.Var("x", R.Tensor((2, 3, 32, 32, 32), "float16")) + x1 = relax.Var("x", R.Tensor((2, 3, 32, 32, 32), "int8")) + x2 = relax.Var("x", R.Tensor((2, 3, 32, 32, 32), "int64")) + + _check_inference( + bb, relax.op.nn.avg_pool3d(x0), relax.TensorStructInfo((2, 3, 32, 32, 32), "float16") + ) + _check_inference( + bb, relax.op.nn.avg_pool3d(x1), relax.TensorStructInfo((2, 3, 32, 32, 32), "int8") + ) + _check_inference( + bb, relax.op.nn.avg_pool3d(x2), relax.TensorStructInfo((2, 3, 32, 32, 32), "int64") + ) + + +def test_avg_pool3d_stride_padding_dilation_int64(): + x = relax.Var("x", R.Tensor((2, 3, 28, 28, 28), "float32")) + avg_pool3d = relax.op.nn.avg_pool3d(x, (3, 3, 3), strides=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1)) + + assert avg_pool3d.attrs.strides[0].dtype == "int64" + assert avg_pool3d.attrs.strides[1].dtype == "int64" + assert avg_pool3d.attrs.strides[2].dtype == "int64" + assert avg_pool3d.attrs.padding[0].dtype == "int64" + assert avg_pool3d.attrs.padding[1].dtype == "int64" + assert avg_pool3d.attrs.padding[2].dtype == "int64" + assert avg_pool3d.attrs.dilation[0].dtype == "int64" + assert avg_pool3d.attrs.dilation[1].dtype == "int64" + assert avg_pool3d.attrs.dilation[2].dtype == "int64" + + +def test_avg_pool3d_wrong_pool_size_strides_padding_dilation_length(): + x = relax.Var("x", R.Tensor((2, 3, 28, 28, 28), "float32")) + with pytest.raises(TVMError): + relax.op.nn.avg_pool3d(x, pool_size=(1, 2, 3, 4)) + with pytest.raises(TVMError): + relax.op.nn.avg_pool3d(x, strides=(1, 2, 3, 4)) + with pytest.raises(TVMError): + relax.op.nn.avg_pool3d(x, padding=(1, 2, 3, 4)) + with pytest.raises(TVMError): + relax.op.nn.avg_pool3d(x, dilation=(1, 2, 3, 4)) + + +def test_avg_pool3d_infer_struct_info_wrong_layout_string(): + bb = relax.BlockBuilder() + x = relax.Var("x", R.Tensor((2, 3, 28, 28, 28), "float32")) + with pytest.raises(TVMError): + bb.normalize(relax.op.nn.avg_pool3d(x, layout="OIHW")) + with pytest.raises(TVMError): + bb.normalize(relax.op.nn.avg_pool3d(x, out_layout="OHWI")) + + +def test_avg_pool3d_wrong_input_ndim(): + bb = relax.BlockBuilder() + x0 = relax.Var("x", R.Tensor((2, 3, 28, 28, 28, 3), "float32")) + x1 = relax.Var("x", R.Tensor("float32", ndim=4)) + with pytest.raises(TVMError): + bb.normalize(relax.op.nn.avg_pool3d(x0)) + with pytest.raises(TVMError): + bb.normalize(relax.op.nn.avg_pool3d(x1)) + + +def test_avg_pool3d_infer_struct_info_wrong_input_type(): + bb = relax.BlockBuilder() + x0 = relax.Var("x", relax.ShapeStructInfo((2, 3, 28, 28, 28))) + x1 = relax.Var("x", relax.FuncStructInfo([], R.Tensor((2, 3, 28, 28, 28), "float32"))) + + with pytest.raises(TVMError): + bb.normalize(relax.op.nn.avg_pool3d(x0)) + with pytest.raises(TVMError): + bb.normalize(relax.op.nn.avg_pool3d(x1)) + + def test_adaptive_avg_pool1d_infer_struct_info(): bb = relax.BlockBuilder() vdev0 = VDevice("llvm") From 5b292a02754911320d42abd41c4368124a0d1ea7 Mon Sep 17 00:00:00 2001 From: deivanayakisankaralingam Date: Tue, 6 May 2025 09:06:32 +0000 Subject: [PATCH 2/4] fix lint format and test script issues --- .../test_frontend_from_exported_program.py | 30 +++++++++++------- tests/python/relax/test_frontend_from_fx.py | 31 +++++++++++++------ tests/python/relax/test_op_nn_pooling.py | 19 +++++++++--- 3 files changed, 55 insertions(+), 25 deletions(-) diff --git a/tests/python/relax/test_frontend_from_exported_program.py b/tests/python/relax/test_frontend_from_exported_program.py index b29b5d44b46e..a1ad180ea6d9 100644 --- a/tests/python/relax/test_frontend_from_exported_program.py +++ b/tests/python/relax/test_frontend_from_exported_program.py @@ -1379,14 +1379,18 @@ def forward(self, input): @tvm.script.ir_module class expected1: @R.function - def main(input_1: R.Tensor((1, 3, 10), dtype="float32")) -> R.Tuple(R.Tensor((1, 3, 10), dtype="float32")): + def main( + input_1: R.Tensor((1, 3, 10), dtype="float32") + ) -> R.Tuple(R.Tensor((1, 3, 10), dtype="float32")): with R.dataflow(): lv: R.Tensor((1, 3, 10), dtype="float32") = R.nn.avg_pool1d( input_1, pool_size=[1], strides=[1], dilation=[1], - padding=[0, 0], ceil_mode=False, count_include_pad=True, + padding=[0, 0], + ceil_mode=False, + count_include_pad=True, layout="NCW", out_layout="NCW", ) @@ -1430,9 +1434,7 @@ def main(input_1: R.Tensor((1, 3, 10), dtype="float32")): class AvgPool1d4(Module): def forward(self, input): - return torch.nn.functional.avg_pool1d( - input, kernel_size=2, stride=2, padding=0 - ) + return torch.nn.functional.avg_pool1d(input, kernel_size=2, stride=2, padding=0) @tvm.script.ir_module class expected3: @@ -1526,7 +1528,7 @@ def main(input_1: R.Tensor((1, 3, 10, 10), dtype="float32")): class AvgPool2d4(Module): def forward(self, input): - return torch.nn.functional.avg_pool2d(input, kernel_size=[2, 1], divisor_override=2) + return torch.nn.functional.avg_pool2d(input, kernel_size=[2, 1]) @tvm.script.ir_module class expected3: @@ -1566,14 +1568,18 @@ def forward(self, input): @tvm.script.ir_module class expected1: @R.function - def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")) -> R.Tuple(R.Tensor((1, 3, 8, 8, 8), dtype="float32")): + def main( + input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32") + ) -> R.Tuple(R.Tensor((1, 3, 8, 8, 8), dtype="float32")): with R.dataflow(): lv: R.Tensor((1, 3, 8, 8, 8), dtype="float32") = R.nn.avg_pool3d( input_1, pool_size=[1, 1, 1], strides=[1, 1, 1], dilation=[1, 1, 1], - padding=[0, 0, 0, 0, 0, 0],ceil_mode=False, count_include_pad=True, + padding=[0, 0, 0, 0, 0, 0], + ceil_mode=False, + count_include_pad=True, layout="NCDHW", out_layout="NCDHW", ) @@ -1606,7 +1612,8 @@ def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")): strides=[2, 2, 2], dilation=[1, 1, 1], padding=[1, 1, 1, 1, 1, 1], - ceil_mode=True,count_include_pad=True, + ceil_mode=True, + count_include_pad=True, layout="NCDHW", out_layout="NCDHW", ) @@ -1617,7 +1624,7 @@ def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")): class AvgPool3d4(Module): def forward(self, input): return torch.nn.functional.avg_pool3d( - input, kernel_size=[2, 1, 2], stride=[2, 1, 2], divisor_override=4 + input, kernel_size=[2, 1, 2], stride=[2, 1, 2] ) @tvm.script.ir_module @@ -1631,7 +1638,8 @@ def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")): strides=[2, 1, 2], dilation=[1, 1, 1], padding=[0, 0, 0, 0, 0, 0], - ceil_mode=False,count_include_pad=True, + ceil_mode=False, + count_include_pad=True, layout="NCDHW", out_layout="NCDHW", ) diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index 27e4c55d3fc9..c12e6a088282 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -1244,14 +1244,18 @@ def __init__(self): @tvm.script.ir_module class expected1: @R.function - def main(input_1: R.Tensor((1, 3, 10), dtype="float32")) -> R.Tensor((1, 3, 10), dtype="float32"): + def main( + input_1: R.Tensor((1, 3, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10), dtype="float32"): with R.dataflow(): lv = R.nn.avg_pool1d( input_1, pool_size=[1], strides=[1], dilation=[1], - padding=[0, 0], ceil_mode=False, count_include_pad=True, + padding=[0, 0], + ceil_mode=False, + count_include_pad=True, layout="NCW", out_layout="NCW", ) @@ -1314,7 +1318,8 @@ def main(input_1: R.Tensor((1, 3, 10), dtype="float32")): strides=[2], dilation=[1], padding=[2, 2], - ceil_mode=True,count_include_pad=True, + ceil_mode=True, + count_include_pad=True, layout="NCW", out_layout="NCW", ) @@ -1361,7 +1366,8 @@ def main(input_1: R.Tensor((1, 3, 10), dtype="float32")): strides=[2], dilation=[1], padding=[0, 0], - ceil_mode=False, count_include_pad=True, + ceil_mode=False, + count_include_pad=True, layout="NCW", out_layout="NCW", ) @@ -1443,7 +1449,7 @@ def main(input_1: R.Tensor((1, 3, 10, 10), dtype="float32")): class AvgPool2d4(Module): def forward(self, input): - return torch.nn.functional.avg_pool2d(input, kernel_size=[2, 1], divisor_override=2) + return torch.nn.functional.avg_pool2d(input, kernel_size=[2, 1]) @tvm.script.ir_module class expected3: @@ -1516,7 +1522,9 @@ def main( @tvm.script.ir_module class expected1: @R.function - def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")) -> R.Tensor((1, 3, 8, 8, 8), dtype="float32"): + def main( + input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32") + ) -> R.Tensor((1, 3, 8, 8, 8), dtype="float32"): with R.dataflow(): lv: R.Tensor((1, 3, 8, 8, 8), dtype="float32") = R.nn.avg_pool3d( input_1, @@ -1524,7 +1532,8 @@ def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")) -> R.Tensor((1, 3, strides=[1, 1, 1], dilation=[1, 1, 1], padding=[0, 0, 0, 0, 0, 0], - ceil_mode=False, count_include_pad=True, + ceil_mode=False, + count_include_pad=True, layout="NCDHW", out_layout="NCDHW", ) @@ -1535,7 +1544,9 @@ def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")) -> R.Tensor((1, 3, class AvgPool3d2(Module): def __init__(self): super().__init__() - self.pool = torch.nn.AvgPool3d(kernel_size=[3, 3, 3], stride=2, padding=1, ceil_mode=True) + self.pool = torch.nn.AvgPool3d( + kernel_size=[3, 3, 3], stride=2, padding=1, ceil_mode=True + ) def forward(self, input): return self.pool(input) @@ -1568,7 +1579,9 @@ def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")): class AvgPool3d4(Module): def forward(self, input): - return torch.nn.functional.avg_pool3d(input, kernel_size=[2, 1, 2], stride=[2, 1, 2], divisor_override=4) + return torch.nn.functional.avg_pool3d( + input, kernel_size=[2, 1, 2], stride=[2, 1, 2] + ) @tvm.script.ir_module class expected3: diff --git a/tests/python/relax/test_op_nn_pooling.py b/tests/python/relax/test_op_nn_pooling.py index 107be0c7af0e..2399cfbabf8e 100644 --- a/tests/python/relax/test_op_nn_pooling.py +++ b/tests/python/relax/test_op_nn_pooling.py @@ -1181,12 +1181,14 @@ def test_avg_pool3d_infer_struct_info(): relax.TensorStructInfo((2, 3, 28, 30, 30), "float32"), ) _check_inference( - bb, relax.op.nn.avg_pool3d(x0, padding=1), relax.TensorStructInfo((2, 3, 34, 34, 34), "float32") + bb, + relax.op.nn.avg_pool3d(x0, padding=1), + relax.TensorStructInfo((2, 3, 34, 34, 34), "float32"), ) _check_inference( bb, relax.op.nn.avg_pool3d(x0, padding=[1, 2, 3]), - relax.TensorStructInfo((2, 3, 34, 36, 35), "float32"), + relax.TensorStructInfo((2, 3, 34, 36, 38), "float32"), ) _check_inference( bb, @@ -1252,7 +1254,7 @@ def test_avg_pool3d_infer_struct_info_shape_symbolic(): ) _check_inference( bb, - relax.op.nn.avg_pool3d(x1, layout="NCDHW16c", out_layout="NHWC"), + relax.op.nn.avg_pool3d(x1, layout="NCDHW16c", out_layout="NDHWC"), relax.TensorStructInfo((n, id_, ih, iw, c * 16), "float32"), ) @@ -1309,7 +1311,12 @@ def test_avg_pool3d_infer_struct_info_ceil_mode_symbolic(): _check_inference( bb, relax.op.nn.avg_pool3d( - x, pool_size=(3, 3, 3), strides=(2, 2, 2), padding=(1, 1, 1), dilation=(2, 2, 2), ceil_mode=True + x, + pool_size=(3, 3, 3), + strides=(2, 2, 2), + padding=(1, 1, 1), + dilation=(2, 2, 2), + ceil_mode=True, ), relax.TensorStructInfo( ( @@ -1343,7 +1350,9 @@ def test_avg_pool3d_infer_struct_info_more_input_dtype(): def test_avg_pool3d_stride_padding_dilation_int64(): x = relax.Var("x", R.Tensor((2, 3, 28, 28, 28), "float32")) - avg_pool3d = relax.op.nn.avg_pool3d(x, (3, 3, 3), strides=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1)) + avg_pool3d = relax.op.nn.avg_pool3d( + x, (3, 3, 3), strides=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1) + ) assert avg_pool3d.attrs.strides[0].dtype == "int64" assert avg_pool3d.attrs.strides[1].dtype == "int64" From 294b92106d50112844a09ab00855e317c577c2bb Mon Sep 17 00:00:00 2001 From: deivanayakisankaralingam Date: Tue, 6 May 2025 10:12:04 +0000 Subject: [PATCH 3/4] update test script to avoid unity issues --- tests/python/relax/test_op_nn_pooling.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/relax/test_op_nn_pooling.py b/tests/python/relax/test_op_nn_pooling.py index 2399cfbabf8e..92bf63843240 100644 --- a/tests/python/relax/test_op_nn_pooling.py +++ b/tests/python/relax/test_op_nn_pooling.py @@ -1208,7 +1208,7 @@ def test_avg_pool3d_infer_struct_info(): _check_inference( bb, relax.op.nn.avg_pool3d(x0, out_layout="NCDHW"), - relax.TensorStructInfo((2, 32, 32, 32, 3), "float32"), + relax.TensorStructInfo((2, 3, 32, 32, 32), "float32"), ) _check_inference( bb, From 32f9398fd65da1c9826f933317cfddc3c65a9758 Mon Sep 17 00:00:00 2001 From: deivanayakisankaralingam Date: Fri, 9 May 2025 08:54:42 +0000 Subject: [PATCH 4/4] fix formatting and conflict issues --- .../torch/base_fx_graph_translator.py | 10 +- .../tvm/relax/frontend/torch/fx_translator.py | 3 - python/tvm/relax/op/nn/nn.py | 2 +- .../test_frontend_from_exported_program.py | 6 +- tests/python/relax/test_frontend_from_fx.py | 202 +++++++++--------- tests/python/relax/test_op_nn_pooling.py | 8 +- 6 files changed, 115 insertions(+), 116 deletions(-) diff --git a/python/tvm/relax/frontend/torch/base_fx_graph_translator.py b/python/tvm/relax/frontend/torch/base_fx_graph_translator.py index 34694bf5e32d..f789eb8af35b 100644 --- a/python/tvm/relax/frontend/torch/base_fx_graph_translator.py +++ b/python/tvm/relax/frontend/torch/base_fx_graph_translator.py @@ -589,7 +589,7 @@ def _avg_pool1d_impl( ceil_mode: Optional[bool] = False, count_include_pad: Optional[bool] = True, ) -> relax.Var: - + # Expand to 3D by adding batch dim if input is 2D x_ndim = x.struct_info.ndim if x_ndim == 2: x = relax.op.expand_dims(x, axis=0) @@ -606,7 +606,7 @@ def _avg_pool1d_impl( layout="NCW", ) ) - + # Remove added batch dim from result if x_ndim == 2: result = relax.op.squeeze(result, axis=[0]) return result @@ -630,6 +630,7 @@ def _avg_pool2d_impl( padding: Optional[int] = 0, ceil_mode: Optional[bool] = False, ) -> relax.Var: + # Expand to 4D by adding batch dim if input is 3D x_ndim = x.struct_info.ndim if x_ndim == 3: x = relax.op.expand_dims(x, axis=0) @@ -645,7 +646,7 @@ def _avg_pool2d_impl( layout="NCHW", ) ) - + # Remove added batch dim from result if x_ndim == 3: result = relax.op.squeeze(result, axis=[0]) return result @@ -668,6 +669,7 @@ def _avg_pool3d_impl( ceil_mode: Optional[bool] = False, count_include_pad: Optional[bool] = True, ) -> relax.Var: + # Expand to 5D by adding batch dim if input is 4D x_ndim = x.struct_info.ndim if x_ndim == 4: x = relax.op.expand_dims(x, axis=0) @@ -684,7 +686,7 @@ def _avg_pool3d_impl( layout="NCDHW", ) ) - + # Remove added batch dim from result if x_ndim == 4: result = relax.op.squeeze(result, axis=[0]) return result diff --git a/python/tvm/relax/frontend/torch/fx_translator.py b/python/tvm/relax/frontend/torch/fx_translator.py index cb3816879b24..0e8814dd974e 100644 --- a/python/tvm/relax/frontend/torch/fx_translator.py +++ b/python/tvm/relax/frontend/torch/fx_translator.py @@ -728,11 +728,8 @@ def create_convert_map( # neural network nn.AdaptiveAvgPool1d: self._adaptive_avg_pool1d_module, nn.AdaptiveAvgPool2d: self._adaptive_avg_pool2d_module, -<<<<<<< HEAD nn.AdaptiveAvgPool3d: self._adaptive_avg_pool3d_module, -======= nn.AvgPool1d: self._avg_pool1d_module, ->>>>>>> 88cf23de1 (add avg pool 1d and 3d op mappings and added test script) nn.AvgPool2d: self._avg_pool2d_module, nn.AvgPool3d: self._avg_pool3d_module, nn.BatchNorm2d: self._batch_norm_2d_module, diff --git a/python/tvm/relax/op/nn/nn.py b/python/tvm/relax/op/nn/nn.py index 44da895eaca5..b68d488e26df 100644 --- a/python/tvm/relax/op/nn/nn.py +++ b/python/tvm/relax/op/nn/nn.py @@ -920,7 +920,7 @@ def avg_pool2d( padding: Union[int, Tuple[int, ...]] = (0, 0), dilation: Union[int, Tuple[int, int]] = (1, 1), ceil_mode: bool = False, - count_include_pad: bool = True, + count_include_pad: bool = False, layout: str = "NCHW", out_layout: Optional[str] = None, ) -> Expr: diff --git a/tests/python/relax/test_frontend_from_exported_program.py b/tests/python/relax/test_frontend_from_exported_program.py index a1ad180ea6d9..b0aebff7049b 100644 --- a/tests/python/relax/test_frontend_from_exported_program.py +++ b/tests/python/relax/test_frontend_from_exported_program.py @@ -1528,7 +1528,7 @@ def main(input_1: R.Tensor((1, 3, 10, 10), dtype="float32")): class AvgPool2d4(Module): def forward(self, input): - return torch.nn.functional.avg_pool2d(input, kernel_size=[2, 1]) + return torch.nn.functional.avg_pool2d(input, kernel_size=[2, 1], divisor_override=2) @tvm.script.ir_module class expected3: @@ -1623,9 +1623,7 @@ def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")): class AvgPool3d4(Module): def forward(self, input): - return torch.nn.functional.avg_pool3d( - input, kernel_size=[2, 1, 2], stride=[2, 1, 2] - ) + return torch.nn.functional.avg_pool3d(input, kernel_size=[2, 1, 2], stride=[2, 1, 2]) @tvm.script.ir_module class expected3: diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index c12e6a088282..53efab4e80cc 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -1187,7 +1187,6 @@ def main( verify_model(MaxPool2d3(), input_info, {}, expected3) -<<<<<<< HEAD def test_maxpool3d(): input_info = [([1, 3, 10, 10, 10], "float32")] @@ -1195,20 +1194,10 @@ class MaxPool3d(Module): def __init__(self): super().__init__() self.pool = torch.nn.MaxPool3d(kernel_size=[1, 1, 1]) -======= -def test_avgpool1d(): - input_info = [([1, 3, 10], "float32")] - - class AvgPool1d(Module): - def __init__(self): - super().__init__() - self.pool = torch.nn.AvgPool1d(kernel_size=1) ->>>>>>> d7440a840 (add avg pool 1d and 3d op mappings and added test script) def forward(self, input): return self.pool(input) -<<<<<<< HEAD class MaxPool3d_functional(Module): def __init__(self): super().__init__() @@ -1240,67 +1229,105 @@ class MaxPool3d2(Module): def __init__(self): super().__init__() self.pool = torch.nn.MaxPool3d(kernel_size=[2, 2, 2], dilation=[1, 2, 2]) -======= + + def forward(self, input): + return self.pool(input) + @tvm.script.ir_module - class expected1: + class expected2: @R.function def main( - input_1: R.Tensor((1, 3, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10), dtype="float32"): + input_1: R.Tensor((1, 3, 10, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 5, 4, 4), dtype="float32"): # Fixed here with R.dataflow(): - lv = R.nn.avg_pool1d( + lv: R.Tensor((1, 3, 5, 4, 4), dtype="float32") = R.nn.max_pool3d( input_1, - pool_size=[1], - strides=[1], - dilation=[1], - padding=[0, 0], - ceil_mode=False, - count_include_pad=True, - layout="NCW", - out_layout="NCW", + pool_size=[2, 2, 2], + strides=[2, 2, 2], + dilation=[1, 2, 2], + padding=[0, 0, 0, 0, 0, 0], + layout="NCDHW", + out_layout="NCDHW", ) - gv = lv + gv: R.Tensor((1, 3, 5, 4, 4), dtype="float32") = lv R.output(gv) return gv - class AvgPool1d2(Module): + class MaxPool3d3(Module): def __init__(self): super().__init__() - self.pool = torch.nn.AvgPool1d(kernel_size=4, stride=2, padding=2, ceil_mode=True) ->>>>>>> d7440a840 (add avg pool 1d and 3d op mappings and added test script) + self.pool = torch.nn.MaxPool3d(kernel_size=[3, 3, 3], padding=1, stride=2) def forward(self, input): return self.pool(input) -<<<<<<< HEAD @tvm.script.ir_module - class expected2: + class expected3: @R.function def main( input_1: R.Tensor((1, 3, 10, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 5, 4, 4), dtype="float32"): # Fixed here + ) -> R.Tensor((1, 3, 5, 5, 5), dtype="float32"): with R.dataflow(): - lv: R.Tensor((1, 3, 5, 4, 4), dtype="float32") = R.nn.max_pool3d( + lv: R.Tensor((1, 3, 5, 5, 5), dtype="float32") = R.nn.max_pool3d( input_1, - pool_size=[2, 2, 2], + pool_size=[3, 3, 3], strides=[2, 2, 2], - dilation=[1, 2, 2], - padding=[0, 0, 0, 0, 0, 0], + dilation=[1, 1, 1], + padding=[1, 1, 1, 1, 1, 1], layout="NCDHW", out_layout="NCDHW", ) - gv: R.Tensor((1, 3, 5, 4, 4), dtype="float32") = lv + gv: R.Tensor((1, 3, 5, 5, 5), dtype="float32") = lv R.output(gv) return gv - class MaxPool3d3(Module): + verify_model(MaxPool3d(), input_info, {}, expected1) + verify_model(MaxPool3d_functional(), input_info, {}, expected1) + verify_model(MaxPool3d2(), input_info, {}, expected2) + verify_model(MaxPool3d3(), input_info, {}, expected3) + + +def test_avgpool1d(): + input_info = [([1, 3, 10], "float32")] + + class AvgPool1d(Module): def __init__(self): super().__init__() - self.pool = torch.nn.MaxPool3d(kernel_size=[3, 3, 3], padding=1, stride=2) + self.pool = torch.nn.AvgPool1d(kernel_size=1) + + def forward(self, input): + return self.pool(input) + + @tvm.script.ir_module + class expected1: + @R.function + def main( + input_1: R.Tensor((1, 3, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10), dtype="float32"): + with R.dataflow(): + lv = R.nn.avg_pool1d( + input_1, + pool_size=[1], + strides=[1], + dilation=[1], + padding=[0, 0], + ceil_mode=False, + count_include_pad=True, + layout="NCW", + out_layout="NCW", + ) + gv = lv + R.output(gv) + return gv + + class AvgPool1d2(Module): + def __init__(self): + super().__init__() + self.pool = torch.nn.AvgPool1d(kernel_size=4, stride=2, padding=2, ceil_mode=True) def forward(self, input): return self.pool(input) -======= + class AvgPool1d3(Module): def forward(self, input): return torch.nn.functional.avg_pool1d( @@ -1330,34 +1357,10 @@ def main(input_1: R.Tensor((1, 3, 10), dtype="float32")): class AvgPool1d4(Module): def forward(self, input): return torch.nn.functional.avg_pool1d(input, kernel_size=2) ->>>>>>> d7440a840 (add avg pool 1d and 3d op mappings and added test script) @tvm.script.ir_module class expected3: @R.function -<<<<<<< HEAD - def main( - input_1: R.Tensor((1, 3, 10, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 5, 5, 5), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((1, 3, 5, 5, 5), dtype="float32") = R.nn.max_pool3d( - input_1, - pool_size=[3, 3, 3], - strides=[2, 2, 2], - dilation=[1, 1, 1], - padding=[1, 1, 1, 1, 1, 1], - layout="NCDHW", - out_layout="NCDHW", - ) - gv: R.Tensor((1, 3, 5, 5, 5), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(MaxPool3d(), input_info, {}, expected1) - verify_model(MaxPool3d_functional(), input_info, {}, expected1) - verify_model(MaxPool3d2(), input_info, {}, expected2) - verify_model(MaxPool3d3(), input_info, {}, expected3) -======= def main(input_1: R.Tensor((1, 3, 10), dtype="float32")): with R.dataflow(): lv = R.nn.avg_pool1d( @@ -1379,7 +1382,6 @@ def main(input_1: R.Tensor((1, 3, 10), dtype="float32")): verify_model(AvgPool1d2(), input_info, {}, expected2) verify_model(AvgPool1d3(), input_info, {}, expected2) verify_model(AvgPool1d4(), input_info, {}, expected3) ->>>>>>> d7440a840 (add avg pool 1d and 3d op mappings and added test script) def test_avgpool2d(): @@ -1449,7 +1451,7 @@ def main(input_1: R.Tensor((1, 3, 10, 10), dtype="float32")): class AvgPool2d4(Module): def forward(self, input): - return torch.nn.functional.avg_pool2d(input, kernel_size=[2, 1]) + return torch.nn.functional.avg_pool2d(input, kernel_size=[2, 1], divisor_override=2) @tvm.script.ir_module class expected3: @@ -1476,15 +1478,6 @@ def main(input_1: R.Tensor((1, 3, 10, 10), dtype="float32")): verify_model(AvgPool2d4(), input_info, {}, expected3) -<<<<<<< HEAD -def test_adaptive_avgpool1d(): - input_info = [([1, 3, 16], "float32")] - - class AdaptiveAvgPool1d0(torch.nn.Module): - def __init__(self): - super().__init__() - self.pool = torch.nn.AdaptiveAvgPool1d(8) -======= def test_avgpool3d(): input_info = [([1, 3, 8, 8, 8], "float32")] @@ -1492,33 +1485,10 @@ class AvgPool3d(Module): def __init__(self): super().__init__() self.pool = torch.nn.AvgPool3d(kernel_size=[1, 1, 1]) ->>>>>>> 88cf23de1 (add avg pool 1d and 3d op mappings and added test script) def forward(self, input): return self.pool(input) -<<<<<<< HEAD - class AdaptiveAvgPool1d1(torch.nn.Module): - def forward(self, input): - return torch.nn.functional.adaptive_avg_pool1d(input, 8) - - @tvm.script.ir_module - class expected1: - @R.function - def main( - input_1: R.Tensor((1, 3, 16), dtype="float32") - ) -> R.Tensor((1, 3, 8), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((1, 3, 8), dtype="float32") = R.nn.adaptive_avg_pool1d( - input_1, output_size=[8], layout="NCW", out_layout="NCW" - ) - gv: R.Tensor((1, 3, 8), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(AdaptiveAvgPool1d0(), input_info, {}, expected1) - verify_model(AdaptiveAvgPool1d1(), input_info, {}, expected1) -======= @tvm.script.ir_module class expected1: @R.function @@ -1579,9 +1549,7 @@ def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")): class AvgPool3d4(Module): def forward(self, input): - return torch.nn.functional.avg_pool3d( - input, kernel_size=[2, 1, 2], stride=[2, 1, 2] - ) + return torch.nn.functional.avg_pool3d(input, kernel_size=[2, 1, 2], stride=[2, 1, 2]) @tvm.script.ir_module class expected3: @@ -1607,7 +1575,39 @@ def main(input_1: R.Tensor((1, 3, 8, 8, 8), dtype="float32")): verify_model(AvgPool3d2(), input_info, {}, expected2) verify_model(AvgPool3d3(), input_info, {}, expected2) verify_model(AvgPool3d4(), input_info, {}, expected3) ->>>>>>> 88cf23de1 (add avg pool 1d and 3d op mappings and added test script) + + +def test_adaptive_avgpool1d(): + input_info = [([1, 3, 16], "float32")] + + class AdaptiveAvgPool1d0(torch.nn.Module): + def __init__(self): + super().__init__() + self.pool = torch.nn.AdaptiveAvgPool1d(8) + + def forward(self, input): + return self.pool(input) + + class AdaptiveAvgPool1d1(torch.nn.Module): + def forward(self, input): + return torch.nn.functional.adaptive_avg_pool1d(input, 8) + + @tvm.script.ir_module + class expected1: + @R.function + def main( + input_1: R.Tensor((1, 3, 16), dtype="float32") + ) -> R.Tensor((1, 3, 8), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((1, 3, 8), dtype="float32") = R.nn.adaptive_avg_pool1d( + input_1, output_size=[8], layout="NCW", out_layout="NCW" + ) + gv: R.Tensor((1, 3, 8), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(AdaptiveAvgPool1d0(), input_info, {}, expected1) + verify_model(AdaptiveAvgPool1d1(), input_info, {}, expected1) def test_adaptive_avgpool2d(): diff --git a/tests/python/relax/test_op_nn_pooling.py b/tests/python/relax/test_op_nn_pooling.py index 92bf63843240..d4461a122de8 100644 --- a/tests/python/relax/test_op_nn_pooling.py +++ b/tests/python/relax/test_op_nn_pooling.py @@ -30,7 +30,9 @@ def test_op_correctness(): assert relax.op.nn.max_pool1d(x1).op == Op.get("relax.nn.max_pool1d") assert relax.op.nn.max_pool2d(x).op == Op.get("relax.nn.max_pool2d") assert relax.op.nn.max_pool3d(x2).op == Op.get("relax.nn.max_pool3d") + assert relax.op.nn.avg_pool1d(x).op == Op.get("relax.nn.avg_pool1d") assert relax.op.nn.avg_pool2d(x).op == Op.get("relax.nn.avg_pool2d") + assert relax.op.nn.avg_pool3d(x).op == Op.get("relax.nn.avg_pool3d") assert relax.op.nn.adaptive_avg_pool1d(x).op == Op.get("relax.nn.adaptive_avg_pool1d") assert relax.op.nn.adaptive_avg_pool2d(x).op == Op.get("relax.nn.adaptive_avg_pool2d") assert relax.op.nn.adaptive_avg_pool3d(x).op == Op.get("relax.nn.adaptive_avg_pool3d") @@ -1161,7 +1163,7 @@ def test_avg_pool3d_infer_struct_info(): x3 = relax.Var("x", R.Tensor("float32")) x4 = relax.Var("x", R.Tensor(ndim=5)) x5 = relax.Var("x", R.Tensor()) - x6 = relax.Var("x", R.Tensor((2, 4, 32, 32, 16), "float32")) + x6 = relax.Var("x", R.Tensor((2, 4, 32, 32, 32, 16), "float32")) x7 = relax.Var("x", R.Tensor((2, 3, 32, 32, 32), "float32", vdev0)) _check_inference( @@ -1212,8 +1214,8 @@ def test_avg_pool3d_infer_struct_info(): ) _check_inference( bb, - relax.op.nn.avg_pool3d(x6, layout="NCDHW16c", out_layout="NHWC16c"), - relax.TensorStructInfo((2, 32, 32, 32, 16), "float32"), + relax.op.nn.avg_pool3d(x6, layout="NCDHW16c", out_layout="NDHWC16c"), + relax.TensorStructInfo((2, 32, 32, 32, 4, 16), "float32"), ) _check_inference( bb, relax.op.nn.avg_pool3d(x2), relax.TensorStructInfo(dtype="float32", ndim=5)