diff --git a/tests/lint/pylint.sh b/tests/lint/pylint.sh index e41dc2bb80b8..2b0b8365649d 100755 --- a/tests/lint/pylint.sh +++ b/tests/lint/pylint.sh @@ -29,7 +29,8 @@ python3 -m pylint tests/python/integration/ --rcfile="$(dirname "$0")"/pylintrc # tests/python/contrib/test_hexagon tests python3 -m pylint tests/python/contrib/test_hexagon/*.py --rcfile="$(dirname "$0")"/pylintrc python3 -m pylint tests/python/contrib/test_hexagon/conv2d/*.py --rcfile="$(dirname "$0")"/pylintrc - +python3 -m pylint tests/python/contrib/test_hexagon/topi/*.py --rcfile="$(dirname "$0")"/pylintrc +python3 -m pylint tests/python/contrib/test_hexagon/metaschedule_e2e/*.py --rcfile="$(dirname "$0")"/pylintrc # tests/python/frontend tests python3 -m pylint tests/python/frontend/caffe/test_forward.py --rcfile="$(dirname "$0")"/pylintrc diff --git a/tests/python/contrib/test_hexagon/metaschedule_e2e/export_models.py b/tests/python/contrib/test_hexagon/metaschedule_e2e/export_models.py index 660fbf757284..3e331cbf8ccb 100644 --- a/tests/python/contrib/test_hexagon/metaschedule_e2e/export_models.py +++ b/tests/python/contrib/test_hexagon/metaschedule_e2e/export_models.py @@ -14,6 +14,8 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +"""Hexagon MetaSchedule test helper functions.""" + import torch from torchvision.models import resnet from torchvision.models.quantization import resnet as qresnet @@ -23,6 +25,7 @@ def export_resnet50_fp16(): + """Export Resnet50 FP16.""" model = resnet.resnet50(pretrained=True).eval() pt_inp = torch.randn(1, 3, 224, 224) @@ -34,14 +37,16 @@ def export_resnet50_fp16(): mod, params = relay.frontend.from_pytorch(script_module, input_shapes) mod = relay.transform.ToMixedPrecision("float16")(mod) - with open("resnet50_fp16.json", "w") as fo: - fo.write(tvm.ir.save_json(mod)) + with open("resnet50_fp16.json", "w") as file: + file.write(tvm.ir.save_json(mod)) - with open("resnet50_fp16.params", "wb") as fo: - fo.write(relay.save_param_dict(params)) + with open("resnet50_fp16.params", "wb") as file: + file.write(relay.save_param_dict(params)) def export_resnet50_int8(): + """Export Resnet50 INT8.""" + def quantize_model(model, inp): model.fuse_model() model.qconfig = torch.quantization.get_default_qconfig("fbgemm") @@ -62,11 +67,11 @@ def quantize_model(model, inp): script_module, input_shapes, keep_quantized_weight=True ) - with open("resnet50_int8.json", "w") as fo: - fo.write(tvm.ir.save_json(mod)) + with open("resnet50_int8.json", "w") as file: + file.write(tvm.ir.save_json(mod)) - with open("resnet50_int8.params", "wb") as fo: - fo.write(relay.save_param_dict(params)) + with open("resnet50_int8.params", "wb") as file: + file.write(relay.save_param_dict(params)) if __name__ == "__main__": diff --git a/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_fp16.py b/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_fp16.py index 84a33b9c80d3..117e9d4b6f19 100644 --- a/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_fp16.py +++ b/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_fp16.py @@ -14,10 +14,12 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +"""Test Resnet50 float16 with MetaSchedule""" + import os -import pytest import tempfile +import pytest import numpy as np import tvm.testing @@ -29,12 +31,6 @@ from ..infrastructure import get_hexagon_target -target = get_hexagon_target("v69") -target_llvm = tvm.target.Target("llvm") -model_json = "resnet50_fp16.json" -model_params = "resnet50_fp16.params" - - def convert_conv2d_layout(mod, desired_layouts): with tvm.transform.PassContext(opt_level=3): seq = tvm.transform.Sequential([relay.transform.ConvertLayout(desired_layouts)]) @@ -44,14 +40,20 @@ def convert_conv2d_layout(mod, desired_layouts): @pytest.mark.skip("End-to-end tuning is skipped on CI.") @tvm.testing.requires_hexagon def test_resnet50(hexagon_launcher): + """Test Resnet50.""" + model_json = "resnet50_fp16.json" + target_llvm = tvm.target.Target("llvm") + target_hexagon = get_hexagon_target("v69") + model_params = "resnet50_fp16.params" + if not os.path.exists(model_json): pytest.skip(msg="Run python export_models.py first.") - with open(model_json, "r") as fi: - mod = tvm.ir.load_json(fi.read()) + with open(model_json, "r") as file: + mod = tvm.ir.load_json(file.read()) - with open(model_params, "rb") as fi: - params = relay.load_param_dict(fi.read()) + with open(model_params, "rb") as file: + params = relay.load_param_dict(file.read()) mod = convert_conv2d_layout(mod, {"nn.conv2d": ["NHWC", "HWIO"]}) @@ -66,7 +68,7 @@ def test_resnet50(hexagon_launcher): with tempfile.TemporaryDirectory() as work_dir: database = ms.relay_integration.tune_relay( mod=mod, - target=target, + target=target_hexagon, params=params, work_dir=work_dir, # for faster tuning @@ -88,7 +90,7 @@ def test_resnet50(hexagon_launcher): hexagon_lowered = ms.relay_integration.compile_relay( database=database, mod=mod, - target=target, + target=target_hexagon, params=params, ) @@ -127,3 +129,7 @@ def test_resnet50(hexagon_launcher): hexagon_lowered.get_graph_json(), hexagon_lowered.lib ) print(debug_ex.profile(input_name=inp.copy())) + + +if __name__ == "__main__": + tvm.testing.main() diff --git a/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_int8.py b/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_int8.py index e7400aee61f6..6970b0ac06b5 100644 --- a/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_int8.py +++ b/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_int8.py @@ -14,12 +14,15 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +"""Test Resnet50 int8 with MetaSchedule""" + import os -import numpy as np -import pytest import tempfile from typing import Optional +import numpy as np +import pytest + import tvm import tvm.testing from tvm import relay @@ -31,15 +34,15 @@ from tvm.tir.schedule import BlockRV, Schedule from ..infrastructure import get_hexagon_target - -executor = relay.backend.Executor("graph", {"link-params": True}) -target = get_hexagon_target("v68") -target_llvm = tvm.target.Target("llvm") -model_json = "resnet50_int8.json" -model_params = "resnet50_int8.params" +MODEL_JSON = "resnet50_int8.json" +EXECUTOR = relay.backend.Executor("graph", {"link-params": True}) +TARGET_LLVM = tvm.target.Target("llvm") +TARGET_HEXAGON = get_hexagon_target("v68") +MODEL_PARAMS = "resnet50_int8.params" def tune_vrmpy_auto_tensorize(mod, params, hexagon_launcher): + """Tune VRMPY with auto tensorization.""" sch_rules = [ schedule_rule.AutoInline( into_producer=False, @@ -95,12 +98,12 @@ def tune_vrmpy_auto_tensorize(mod, params, hexagon_launcher): # This line is necessary for link-params to take effect during # task extraction and relay.build(...). - mod = mod.with_attr("executor", executor) + mod = mod.with_attr("executor", EXECUTOR) with tempfile.TemporaryDirectory() as work_dir: database = ms.relay_integration.tune_relay( mod=mod, - target=target, + target=TARGET_HEXAGON, params=params, work_dir=work_dir, # for faster tuning @@ -129,7 +132,7 @@ def tune_vrmpy_auto_tensorize(mod, params, hexagon_launcher): return ms.relay_integration.compile_relay( database=database, mod=mod, - target=target, + target=TARGET_HEXAGON, params=params, ) @@ -137,14 +140,15 @@ def tune_vrmpy_auto_tensorize(mod, params, hexagon_launcher): @pytest.mark.skip("End-to-end tuning is skipped on CI.") @tvm.testing.requires_hexagon def test_resnet50(hexagon_launcher): - if not os.path.exists(model_json): + """Test Resnet50.""" + if not os.path.exists(MODEL_JSON): pytest.skip(msg="Run python export_models.py first.") - with open(model_json, "r") as fi: - mod = tvm.ir.load_json(fi.read()) + with open(MODEL_JSON, "r") as file: + mod = tvm.ir.load_json(file.read()) - with open(model_params, "rb") as fi: - params = relay.load_param_dict(fi.read()) + with open(MODEL_PARAMS, "rb") as file: + params = relay.load_param_dict(file.read()) inp = np.random.randn(1, 3, 224, 224).astype("float32") input_name = "image" @@ -156,15 +160,15 @@ def test_resnet50(hexagon_launcher): with tvm.transform.PassContext(opt_level=3): hexagon_lowered = relay.build( mod, - tvm.target.Target(target, host=target), + tvm.target.Target(TARGET_HEXAGON, host=TARGET_HEXAGON), params=params, - executor=executor, + executor=EXECUTOR, ) with tvm.transform.PassContext(opt_level=3): llvm_lowered = tvm.relay.build( mod, - tvm.target.Target(target_llvm, host=target_llvm), + tvm.target.Target(TARGET_LLVM, host=TARGET_LLVM), params=params, ) @@ -191,16 +195,16 @@ def test_resnet50(hexagon_launcher): print(debug_ex.profile(input_name=inp.copy())) -def _schedule_packed_8x8x32_conv2d(do_tune: bool): +def _schedule_packed_8x8x32_conv2d(): """Manually schedule a conv2d block, created from TE compute op via CreatePrimFunc, using 8x8x32 packed layout. """ def schedule_fn(sch, conv2d_block: Optional[BlockRV] = None) -> bool: - if conv2d_block == None: + if conv2d_block is None: try: conv2d_block = sch.get_block("conv2d_NCHWc_int8") - except: + except ValueError: return False assert "conv2d_NCHWc_int8" in sch.get(conv2d_block).annotations["schedule_rule"] @@ -234,13 +238,13 @@ def schedule_fn(sch, conv2d_block: Optional[BlockRV] = None) -> bool: # be desirable to do this with coarser spatial granularity sch.compute_at(conv2d_block, loops[4]) - def index_map_nchw32c_nchw8h8w32c(n, c, h, w, c32): - return [n, c, h // 8, w // 8, h % 8, w % 8, c32] + def index_map_nchw32c_nchw8h8w32c(n_batch, channel, height, width, channel_32): + return [n_batch, channel, height // 8, width // 8, height % 8, width % 8, channel_32] # Add cache for input and output activation layout transform, # note that weight is already in correct layout - input_cache = sch.cache_read(conv2d_block, 0, "global") - output_cache = sch.cache_write(outer_block, 0, "global") + input_cache = sch.cache_read(conv2d_block, 0, "global") # pylint: disable=unused-variable + output_cache = sch.cache_write(outer_block, 0, "global") # pylint: disable=unused-variable # Transform the layout of the input sch.transform_layout( conv2d_block, ("read", 0), index_map=index_map_nchw32c_nchw8h8w32c, pad_value=0 @@ -259,23 +263,25 @@ def index_map_nchw32c_nchw8h8w32c(n, c, h, w, c32): def tune_packed_8x8x32_template(mod, params, hexagon_launcher): + """Generate packed 8*8*32 template.""" + def schedule_rule_conv2d_packed_8x8x32(sch: Schedule, conv2d_block: BlockRV): - _schedule_packed_8x8x32_conv2d(do_tune=True)(sch, conv2d_block) + _schedule_packed_8x8x32_conv2d()(sch, conv2d_block) return [sch] register_func("meta_schedule.conv2d_NCHWc_int8", schedule_rule_conv2d_packed_8x8x32) def schedule_conv2d_for_tune(sch: Schedule): - _schedule_packed_8x8x32_conv2d(do_tune=True)(sch) + _schedule_packed_8x8x32_conv2d()(sch) # This line is necessary for link-params to take effect during # task extraction and relay.build(...). - mod = mod.with_attr("executor", executor) + mod = mod.with_attr("executor", EXECUTOR) with tempfile.TemporaryDirectory() as work_dir: database = ms.relay_integration.tune_relay( mod=mod, - target=target, + target=TARGET_HEXAGON, params=params, work_dir=work_dir, max_trials_global=20000, @@ -309,7 +315,7 @@ def schedule_conv2d_for_tune(sch: Schedule): return ms.relay_integration.compile_relay( database=database, mod=mod, - target=target, + target=TARGET_HEXAGON, params=params, ) @@ -317,14 +323,15 @@ def schedule_conv2d_for_tune(sch: Schedule): @pytest.mark.skip("End-to-end tuning is skipped on CI.") @tvm.testing.requires_hexagon def test_packed_8x8x32_resnet50(hexagon_launcher): - if not os.path.exists(model_json): + """Test packed 8*8*32 Resnet50""" + if not os.path.exists(MODEL_JSON): pytest.skip(msg="Run python export_models.py first.") - with open(model_json, "r") as fi: - mod = tvm.ir.load_json(fi.read()) + with open(MODEL_JSON, "r") as file: + mod = tvm.ir.load_json(file.read()) - with open(model_params, "rb") as fi: - params = relay.load_param_dict(fi.read()) + with open(MODEL_PARAMS, "rb") as file: + params = relay.load_param_dict(file.read()) inp = np.random.randn(1, 3, 224, 224).astype("float32") input_name = "image" @@ -336,15 +343,15 @@ def test_packed_8x8x32_resnet50(hexagon_launcher): with tvm.transform.PassContext(opt_level=3): hexagon_lowered = relay.build( mod, - tvm.target.Target(target, host=target), + tvm.target.Target(TARGET_HEXAGON, host=TARGET_HEXAGON), params=params, - executor=executor, + executor=EXECUTOR, ) with tvm.transform.PassContext(opt_level=3): llvm_lowered = tvm.relay.build( mod, - tvm.target.Target(target_llvm, host=target_llvm), + tvm.target.Target(TARGET_LLVM, host=TARGET_LLVM), params=params, ) @@ -360,3 +367,7 @@ def test_packed_8x8x32_resnet50(hexagon_launcher): ref_result = llvm_graph_mod.get_output(0).numpy() np.testing.assert_allclose(ref_result, hexagon_output, atol=1e-4, rtol=1e-5) + + +if __name__ == "__main__": + tvm.testing.main() diff --git a/tests/python/contrib/test_hexagon/topi/__init__.py b/tests/python/contrib/test_hexagon/topi/__init__.py index fb6657b09e51..dce5413e66e2 100644 --- a/tests/python/contrib/test_hexagon/topi/__init__.py +++ b/tests/python/contrib/test_hexagon/topi/__init__.py @@ -15,4 +15,4 @@ # specific language governing permissions and limitations # under the License. -""" Testing infrastructure for Hexagon/TOPI """ +""" Hexagon TOPI tests """ diff --git a/tests/python/contrib/test_hexagon/topi/slice_op/__init__.py b/tests/python/contrib/test_hexagon/topi/slice_op/__init__.py new file mode 100644 index 000000000000..baf28ad93323 --- /dev/null +++ b/tests/python/contrib/test_hexagon/topi/slice_op/__init__.py @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" Hexagon TOPI Slice OP tests """ diff --git a/tests/python/contrib/test_hexagon/topi/test_argmax_slice.py b/tests/python/contrib/test_hexagon/topi/slice_op/test_argmax_slice.py similarity index 97% rename from tests/python/contrib/test_hexagon/topi/test_argmax_slice.py rename to tests/python/contrib/test_hexagon/topi/slice_op/test_argmax_slice.py index 5ed86a1fcc92..5f4a594fcfb1 100644 --- a/tests/python/contrib/test_hexagon/topi/test_argmax_slice.py +++ b/tests/python/contrib/test_hexagon/topi/slice_op/test_argmax_slice.py @@ -22,7 +22,7 @@ from tvm import te import tvm.topi.hexagon.slice_ops as sl import tvm.contrib.hexagon -from ..infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target +from ...infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target class TestArgMaxSlice: diff --git a/tests/python/contrib/test_hexagon/topi/test_avg_pool2d_slice.py b/tests/python/contrib/test_hexagon/topi/slice_op/test_avg_pool2d_slice.py similarity index 99% rename from tests/python/contrib/test_hexagon/topi/test_avg_pool2d_slice.py rename to tests/python/contrib/test_hexagon/topi/slice_op/test_avg_pool2d_slice.py index 6f6a7d762747..13876da87295 100644 --- a/tests/python/contrib/test_hexagon/topi/test_avg_pool2d_slice.py +++ b/tests/python/contrib/test_hexagon/topi/slice_op/test_avg_pool2d_slice.py @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. -import pytest import numpy as np from typing import * @@ -24,13 +23,13 @@ from tvm.contrib.hexagon.session import Session import tvm.topi.hexagon.slice_ops as sl import tvm.topi.hexagon.qnn as qn -from ..infrastructure import ( +from ...infrastructure import ( allocate_hexagon_array, transform_numpy, quantize_np, get_hexagon_target, ) -from ..pytest_util import ( +from ...pytest_util import ( get_multitest_ids, create_populated_numpy_ndarray, TensorContentRandom, diff --git a/tests/python/contrib/test_hexagon/topi/test_cast_slice.py b/tests/python/contrib/test_hexagon/topi/slice_op/test_cast_slice.py similarity index 98% rename from tests/python/contrib/test_hexagon/topi/test_cast_slice.py rename to tests/python/contrib/test_hexagon/topi/slice_op/test_cast_slice.py index 7f59e3ffa7fd..3118c7be8efb 100644 --- a/tests/python/contrib/test_hexagon/topi/test_cast_slice.py +++ b/tests/python/contrib/test_hexagon/topi/slice_op/test_cast_slice.py @@ -23,7 +23,7 @@ from tvm import te import tvm.topi.hexagon.slice_ops as sl -from ..infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target +from ...infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target class TestCastF16F32Slice2d: diff --git a/tests/python/contrib/test_hexagon/topi/test_clip.py b/tests/python/contrib/test_hexagon/topi/slice_op/test_clip_slice.py old mode 100755 new mode 100644 similarity index 97% rename from tests/python/contrib/test_hexagon/topi/test_clip.py rename to tests/python/contrib/test_hexagon/topi/slice_op/test_clip_slice.py index 3f8f5077c758..e0a2e20a0b6b --- a/tests/python/contrib/test_hexagon/topi/test_clip.py +++ b/tests/python/contrib/test_hexagon/topi/slice_op/test_clip_slice.py @@ -20,10 +20,10 @@ import numpy as np from tvm import te - import tvm.testing import tvm.topi.hexagon.slice_ops as sl -from ..infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target + +from ...infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target input_layout = tvm.testing.parameter( "nhwc-8h2w32c2w-2d", diff --git a/tests/python/contrib/test_hexagon/topi/test_conv2d_slice.py b/tests/python/contrib/test_hexagon/topi/slice_op/test_conv2d_slice.py old mode 100755 new mode 100644 similarity index 99% rename from tests/python/contrib/test_hexagon/topi/test_conv2d_slice.py rename to tests/python/contrib/test_hexagon/topi/slice_op/test_conv2d_slice.py index 242265169fb8..c314e9655c9a --- a/tests/python/contrib/test_hexagon/topi/test_conv2d_slice.py +++ b/tests/python/contrib/test_hexagon/topi/slice_op/test_conv2d_slice.py @@ -25,7 +25,7 @@ from tvm.topi.hexagon.slice_ops.conv2d import conv2d_compute, conv2d_schedule from tvm.topi.testing import conv2d_nhwc_python -from ..infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target +from ...infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target input_layout = tvm.testing.parameter( "nhwc-8h2w32c2w-2d", diff --git a/tests/python/contrib/test_hexagon/topi/test_depthwise_conv2d_slice.py b/tests/python/contrib/test_hexagon/topi/slice_op/test_depthwise_conv2d_slice.py similarity index 99% rename from tests/python/contrib/test_hexagon/topi/test_depthwise_conv2d_slice.py rename to tests/python/contrib/test_hexagon/topi/slice_op/test_depthwise_conv2d_slice.py index 840a462917ae..74e4d05446ed 100644 --- a/tests/python/contrib/test_hexagon/topi/test_depthwise_conv2d_slice.py +++ b/tests/python/contrib/test_hexagon/topi/slice_op/test_depthwise_conv2d_slice.py @@ -19,12 +19,14 @@ """Test depthwise_conv2d slice op for hexagon.""" import numpy as np + import tvm import tvm.testing import tvm.topi.hexagon.qnn as qn from tvm.topi.testing import depthwise_conv2d_python_nhwc from tvm.topi.hexagon.slice_ops.dwconv2d import dwconv2d_compute, dwconv2d_schedule -from ..infrastructure import allocate_hexagon_array, transform_numpy, quantize_np + +from ...infrastructure import allocate_hexagon_array, transform_numpy, quantize_np @tvm.testing.fixture diff --git a/tests/python/contrib/test_hexagon/topi/test_dequantize_slice.py b/tests/python/contrib/test_hexagon/topi/slice_op/test_dequantize_slice.py similarity index 99% rename from tests/python/contrib/test_hexagon/topi/test_dequantize_slice.py rename to tests/python/contrib/test_hexagon/topi/slice_op/test_dequantize_slice.py index 6ed217180aba..9b1c5bc5f614 100644 --- a/tests/python/contrib/test_hexagon/topi/test_dequantize_slice.py +++ b/tests/python/contrib/test_hexagon/topi/slice_op/test_dequantize_slice.py @@ -23,7 +23,7 @@ import tvm.testing from tvm import te from tvm.topi.hexagon import qnn -from ..infrastructure import ( +from ...infrastructure import ( allocate_hexagon_array, transform_numpy, quantize_np, diff --git a/tests/python/contrib/test_hexagon/topi/test_max_pool2d_slice.py b/tests/python/contrib/test_hexagon/topi/slice_op/test_max_pool2d_slice.py similarity index 98% rename from tests/python/contrib/test_hexagon/topi/test_max_pool2d_slice.py rename to tests/python/contrib/test_hexagon/topi/slice_op/test_max_pool2d_slice.py index f2ee76863cb6..fcb4411609b2 100644 --- a/tests/python/contrib/test_hexagon/topi/test_max_pool2d_slice.py +++ b/tests/python/contrib/test_hexagon/topi/slice_op/test_max_pool2d_slice.py @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. -import pytest import numpy as np from typing import * @@ -23,8 +22,9 @@ import tvm.testing from tvm.contrib.hexagon.session import Session import tvm.topi.hexagon.slice_ops as sl -from ..infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target -from ..pytest_util import ( + +from ...infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target +from ...pytest_util import ( get_multitest_ids, create_populated_numpy_ndarray, TensorContentRandom, diff --git a/tests/python/contrib/test_hexagon/topi/test_relu_slice.py b/tests/python/contrib/test_hexagon/topi/slice_op/test_relu_slice.py similarity index 97% rename from tests/python/contrib/test_hexagon/topi/test_relu_slice.py rename to tests/python/contrib/test_hexagon/topi/slice_op/test_relu_slice.py index fd04cca061da..93a8d77827bf 100644 --- a/tests/python/contrib/test_hexagon/topi/test_relu_slice.py +++ b/tests/python/contrib/test_hexagon/topi/slice_op/test_relu_slice.py @@ -16,14 +16,13 @@ # under the License. import numpy as np -import pytest import tvm import tvm.testing from tvm.topi.hexagon.slice_ops.relu import relu_compute, relu_stir_schedule from tvm import te -from ..infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target +from ...infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target @tvm.testing.fixture diff --git a/tests/python/contrib/test_hexagon/topi/test_softmax_slice.py b/tests/python/contrib/test_hexagon/topi/slice_op/test_softmax_slice.py similarity index 98% rename from tests/python/contrib/test_hexagon/topi/test_softmax_slice.py rename to tests/python/contrib/test_hexagon/topi/slice_op/test_softmax_slice.py index 1329fda7aa4a..a3db1b6dcdbe 100644 --- a/tests/python/contrib/test_hexagon/topi/test_softmax_slice.py +++ b/tests/python/contrib/test_hexagon/topi/slice_op/test_softmax_slice.py @@ -21,7 +21,7 @@ from tvm.topi.testing import softmax_python import tvm.topi.hexagon.slice_ops as sl -from ..infrastructure import allocate_hexagon_array, get_hexagon_target +from ...infrastructure import allocate_hexagon_array def transform_numpy(arr_np, layout): diff --git a/tests/python/contrib/test_hexagon/topi/test_tanh_slice.py b/tests/python/contrib/test_hexagon/topi/slice_op/test_tanh_slice.py similarity index 97% rename from tests/python/contrib/test_hexagon/topi/test_tanh_slice.py rename to tests/python/contrib/test_hexagon/topi/slice_op/test_tanh_slice.py index 02c587b9809c..f8c14ef934a1 100644 --- a/tests/python/contrib/test_hexagon/topi/test_tanh_slice.py +++ b/tests/python/contrib/test_hexagon/topi/slice_op/test_tanh_slice.py @@ -16,14 +16,13 @@ # under the License. """ Test for Hexagon slice tanh op """ import numpy as np -import pytest import tvm import tvm.testing from tvm import te import tvm.topi.hexagon.slice_ops as sl import tvm.contrib.hexagon -from ..infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target +from ...infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target # pylint: disable=invalid-name diff --git a/tests/python/contrib/test_hexagon/topi/test_add_subtract_multiply.py b/tests/python/contrib/test_hexagon/topi/test_add_subtract_multiply.py old mode 100755 new mode 100644 index 711d725e842f..d689888d6e85 --- a/tests/python/contrib/test_hexagon/topi/test_add_subtract_multiply.py +++ b/tests/python/contrib/test_hexagon/topi/test_add_subtract_multiply.py @@ -14,9 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. - - -import pytest +"""Test code for Add, Subtract and Multiply.""" import numpy as np import tvm @@ -30,78 +28,14 @@ get_hexagon_target, ) +ZERO_POINT_A_VAL = None +SCALE_A_VAL = None -@tvm.testing.fixture -def expected_output_np(input_np_A, input_np_B, op_name): - if op_name == "add": - out_ref = np.add(input_np_A, input_np_B) - elif op_name == "subtract": - out_ref = np.subtract(input_np_A, input_np_B) - elif op_name == "multiply": - out_ref = np.multiply(input_np_A, input_np_B) - return out_ref - - -@tvm.testing.fixture -def input_np_A(input_shape_A, dtype): - if dtype == "uint8" or dtype == "int8": - dtype = "float32" - return np.random.random(input_shape_A).astype(dtype) - - -@tvm.testing.fixture -def input_np_B(input_shape_B, dtype): - if dtype == "uint8" or dtype == "int8": - dtype = "float32" - return np.random.random(input_shape_B).astype(dtype) - - -@tvm.testing.fixture -def quantize_input_np_A(input_np_A, dtype): - if dtype == "uint8" or dtype == "int8": - global zero_point_A_val, scale_A_val - input_np_A_quantized, scale_A_val, zero_point_A_val = quantize_np(input_np_A, dtype) - return input_np_A_quantized - - -@tvm.testing.fixture -def quantize_input_np_B(input_np_B, dtype): - if dtype == "uint8" or dtype == "int8": - global zero_point_B_val, scale_B_val - input_np_B_quantized, scale_B_val, zero_point_B_val = quantize_np(input_np_B, dtype) - return input_np_B_quantized - - -@tvm.testing.fixture -def transformed_input_np_A(input_np_A, quantize_input_np_A, input_A_layout, dtype): - if dtype == "float16": - return transform_numpy(input_np_A, "nhwc", input_A_layout) - if dtype == "uint8" or dtype == "int8": - return transform_numpy(quantize_input_np_A, "nhwc", input_A_layout) - - raise RuntimeError(f"Unsupported data type '{dtype}'") - - -@tvm.testing.fixture -def transformed_input_np_B(input_np_B, quantize_input_np_B, input_B_layout, dtype): - if dtype == "float16": - return transform_numpy(input_np_B, "nhwc", input_B_layout) - if dtype == "uint8" or dtype == "int8": - return transform_numpy(quantize_input_np_B, "nhwc", input_B_layout) +ZERO_POINT_B_VAL = None +SCALE_B_VAL = None - raise RuntimeError(f"Unsupported data type '{dtype}'") - - -@tvm.testing.fixture -def transformed_expected_output_np(expected_output_np, output_layout, dtype): - if dtype == "float16": - return transform_numpy(expected_output_np, "nhwc", output_layout) - if dtype == "uint8" or dtype == "int8": - global zero_point_M_val, scale_M_val - out_ref_quantized, scale_M_val, zero_point_M_val = quantize_np(expected_output_np, dtype) - return transform_numpy(out_ref_quantized, "nhwc", output_layout) - - raise RuntimeError(f"Unsupported data type '{dtype}'") +ZERO_POINT_M_VAL = None +SCALE_M_VAL = None def hexagon_wrapper_allocation( @@ -114,7 +48,7 @@ def hexagon_wrapper_allocation( dtype=None, ): """Input layout can either be nhwc-8h2w32c2w-2d or nhwc""" - if layout == "nhwc-8h2w32c2w-2d" or layout == "nhwc-8h8w32c-2d": + if layout in ["nhwc-8h2w32c2w-2d", "nhwc-8h8w32c-2d"]: data_nd = allocate_hexagon_array( device, tensor_shape=tensor_shape, @@ -132,11 +66,13 @@ def hexagon_wrapper_allocation( class TestAddSubtractMultiplyBroadcast2d: + """Test Add, Subtract and Multiply class.""" + ( - input_shape_A, - input_shape_B, - input_A_layout, - input_B_layout, + input_shape_a, + input_shape_b, + input_a_layout, + input_b_layout, output_layout, dtype, ) = tvm.testing.parameters( @@ -269,60 +205,134 @@ class TestAddSubtractMultiplyBroadcast2d: op_name = tvm.testing.parameter("add", "subtract", "multiply") + @tvm.testing.fixture + def expected_output_np(self, input_np_a, input_np_b, op_name): + """Generate expected output.""" + if op_name == "add": + out_ref = np.add(input_np_a, input_np_b) + elif op_name == "subtract": + out_ref = np.subtract(input_np_a, input_np_b) + elif op_name == "multiply": + out_ref = np.multiply(input_np_a, input_np_b) + return out_ref + + @tvm.testing.fixture + def transformed_expected_output_np(self, expected_output_np, output_layout, dtype): + """Generate expected output.""" + if dtype == "float16": + return transform_numpy(expected_output_np, "nhwc", output_layout) + if dtype in ["uint8", "int8"]: + global ZERO_POINT_M_VAL, SCALE_M_VAL + out_ref_quantized, SCALE_M_VAL, ZERO_POINT_M_VAL = quantize_np( + expected_output_np, dtype + ) + return transform_numpy(out_ref_quantized, "nhwc", output_layout) + + raise RuntimeError(f"Unsupported data type '{dtype}'") + + @tvm.testing.fixture + def input_np_a(self, input_shape_a, dtype): + """Generate numpy input for variable a.""" + if dtype in ["uint8", "int8"]: + dtype = "float32" + return np.random.random(input_shape_a).astype(dtype) + + @tvm.testing.fixture + def input_np_b(self, input_shape_b, dtype): + """Generate numpy input for variable b.""" + if dtype in ["uint8", "int8"]: + dtype = "float32" + return np.random.random(input_shape_b).astype(dtype) + + @tvm.testing.fixture + def quantize_input_np_a(self, input_np_a, dtype): + if dtype in ["uint8", "int8"]: + global ZERO_POINT_A_VAL, SCALE_A_VAL + input_np_a_quantized, SCALE_A_VAL, ZERO_POINT_A_VAL = quantize_np(input_np_a, dtype) + return input_np_a_quantized + return None + + @tvm.testing.fixture + def quantize_input_np_b(self, input_np_b, dtype): + if dtype in ["uint8", "int8"]: + global ZERO_POINT_B_VAL, SCALE_B_VAL + input_np_b_quantized, SCALE_B_VAL, ZERO_POINT_B_VAL = quantize_np(input_np_b, dtype) + return input_np_b_quantized + return None + + @tvm.testing.fixture + def transformed_input_np_a(self, input_np_a, quantize_input_np_a, input_a_layout, dtype): + if dtype == "float16": + return transform_numpy(input_np_a, "nhwc", input_a_layout) + if dtype in ["uint8", "int8"]: + return transform_numpy(quantize_input_np_a, "nhwc", input_a_layout) + + raise RuntimeError(f"Unsupported data type '{dtype}'") + + @tvm.testing.fixture + def transformed_input_np_b(self, input_np_b, quantize_input_np_b, input_b_layout, dtype): + if dtype == "float16": + return transform_numpy(input_np_b, "nhwc", input_b_layout) + if dtype in ["uint8", "int8"]: + return transform_numpy(quantize_input_np_b, "nhwc", input_b_layout) + + raise RuntimeError(f"Unsupported data type '{dtype}'") + @tvm.testing.requires_hexagon def test_transform( self, dtype, - input_shape_A, - input_shape_B, - input_np_A, - input_np_B, - quantize_input_np_A, - quantize_input_np_B, - transformed_input_np_A, - transformed_input_np_B, + input_shape_a, + input_shape_b, + input_np_a, + input_np_b, + quantize_input_np_a, + quantize_input_np_b, + transformed_input_np_a, + transformed_input_np_b, expected_output_np, transformed_expected_output_np, hexagon_session, output_layout, - input_A_layout, - input_B_layout, + input_a_layout, + input_b_layout, op_name, ): + """Test transform.""" output_shape = expected_output_np.shape - A = te.placeholder(input_shape_A, name="A", dtype=dtype) - B = te.placeholder(input_shape_B, name="B", dtype=dtype) + a_tensor = te.placeholder(input_shape_a, name="a_tensor", dtype=dtype) + b_tensor = te.placeholder(input_shape_b, name="b_tensor", dtype=dtype) if dtype == "float16": if op_name == "add": - M = sl.add_broadcast_compute(A, B) + m_tensor = sl.add_broadcast_compute(a_tensor, b_tensor) elif op_name == "subtract": - M = sl.subtract_broadcast_compute(A, B) + m_tensor = sl.subtract_broadcast_compute(a_tensor, b_tensor) elif op_name == "multiply": - M = sl.multiply_broadcast_compute(A, B) + m_tensor = sl.multiply_broadcast_compute(a_tensor, b_tensor) tir_schedule = sl.tir_broadcast_schedule( - M, A, B, output_layout, input_A_layout, input_B_layout, op_name + m_tensor, a_tensor, b_tensor, output_layout, input_a_layout, input_b_layout, op_name ) - elif dtype == "uint8" or dtype == "int8": + elif dtype in ["uint8", "int8"]: args = [ - A, - B, + a_tensor, + b_tensor, output_shape, - zero_point_A_val, - scale_A_val, - zero_point_B_val, - scale_B_val, - zero_point_M_val, - scale_M_val, + ZERO_POINT_A_VAL, + SCALE_A_VAL, + ZERO_POINT_B_VAL, + SCALE_B_VAL, + ZERO_POINT_M_VAL, + SCALE_M_VAL, dtype, ] if op_name == "add": - M = qn.qadd_broadcast_compute(*args) + m_tensor = qn.qadd_broadcast_compute(*args) elif op_name == "subtract": - M = qn.qsubtract_broadcast_compute(*args) + m_tensor = qn.qsubtract_broadcast_compute(*args) elif op_name == "multiply": - M = qn.qmultiply_broadcast_compute(*args) + m_tensor = qn.qmultiply_broadcast_compute(*args) tir_schedule = qn.tir_schedule_quant( - M, A, B, output_layout, input_A_layout, input_B_layout + m_tensor, a_tensor, b_tensor, output_layout, input_a_layout, input_b_layout ) sch = tir_schedule.mod @@ -339,35 +349,35 @@ def test_transform( with tvm.transform.PassContext(opt_level=3): func = tvm.build( sch, - [A, B, M], + [a_tensor, b_tensor, m_tensor], get_hexagon_target("v69"), name="slice_op_with_transform", ) if dtype == "float16": - in_data_np_A = input_np_A - in_data_np_B = input_np_B - elif dtype == "int8" or dtype == "uint8": - in_data_np_A = quantize_input_np_A - in_data_np_B = quantize_input_np_B + in_data_np_a = input_np_a + in_data_np_b = input_np_b + elif dtype in ["int8", "uint8"]: + in_data_np_a = quantize_input_np_a + in_data_np_b = quantize_input_np_b else: raise RuntimeError(f"Unsupport dtype '{dtype}'") - A_data_nd = hexagon_wrapper_allocation( + a_data_nd = hexagon_wrapper_allocation( hexagon_session.device, - layout=input_A_layout, - data_original=in_data_np_A, - transformed_data=transformed_input_np_A, + layout=input_a_layout, + data_original=in_data_np_a, + transformed_data=transformed_input_np_a, axis_separators=input_axis_separator, ) - B_data_nd = hexagon_wrapper_allocation( + b_data_nd = hexagon_wrapper_allocation( hexagon_session.device, - layout=input_B_layout, - data_original=in_data_np_B, - transformed_data=transformed_input_np_B, + layout=input_b_layout, + data_original=in_data_np_b, + transformed_data=transformed_input_np_b, axis_separators=input_axis_separator, ) - M_data_nd = hexagon_wrapper_allocation( + m_data_nd = hexagon_wrapper_allocation( hexagon_session.device, layout=output_layout, tensor_shape=transformed_expected_output_np.shape, @@ -376,21 +386,25 @@ def test_transform( ) mod = hexagon_session.load_module(func) - mod(A_data_nd, B_data_nd, M_data_nd) + mod(a_data_nd, b_data_nd, m_data_nd) - b, h, w, c = output_shape + batch, height, width, channel = output_shape # convert nd to np and reshape to fixed chunk size layout if output_layout == "nhwc-8h2w32c2w-2d": - M_data_np = M_data_nd.numpy().reshape([b, h // 8, w // 4, c // 32, 8, 2, 32, 2]) + m_data_np = m_data_nd.numpy().reshape( + [batch, height // 8, width // 4, channel // 32, 8, 2, 32, 2] + ) elif output_layout == "nhwc-8h8w32c-2d": - M_data_np = M_data_nd.numpy().reshape([b, h // 8, w // 8, c // 32, 8, 8, 32]) + m_data_np = m_data_nd.numpy().reshape( + [batch, height // 8, width // 8, channel // 32, 8, 8, 32] + ) if dtype == "float16": np.testing.assert_allclose( - transformed_expected_output_np, M_data_np, rtol=1e-3, atol=1e-3 + transformed_expected_output_np, m_data_np, rtol=1e-3, atol=1e-3 ) - elif dtype == "int8" or dtype == "uint8": - np.testing.assert_allclose(transformed_expected_output_np, M_data_np, rtol=1, atol=1) + elif dtype in ["int8", "uint8"]: + np.testing.assert_allclose(transformed_expected_output_np, m_data_np, rtol=1, atol=1) if __name__ == "__main__": diff --git a/tests/python/contrib/test_hexagon/topi/test_batch_matmul.py b/tests/python/contrib/test_hexagon/topi/test_batch_matmul.py index f3273ea8b65b..22fd96254ca7 100644 --- a/tests/python/contrib/test_hexagon/topi/test_batch_matmul.py +++ b/tests/python/contrib/test_hexagon/topi/test_batch_matmul.py @@ -25,18 +25,14 @@ from tvm.contrib.hexagon.session import Session import tvm.topi.testing from tvm.topi.utils import get_const_tuple -from tvm.contrib.hexagon.session import Session from ..infrastructure import get_hexagon_target -dtype = tvm.testing.parameter( - "float32", - "float16", -) - class TestMatMulFloat: - x_batch, y_batch, M, N, K = tvm.testing.parameters( + """Test MatMul Float class.""" + + x_batch, y_batch, m_size, n_size, k_size = tvm.testing.parameters( (1, 1, 16, 16, 32), (5, 5, 16, 16, 32), (5, 5, 16, 20, 32), @@ -46,18 +42,26 @@ class TestMatMulFloat: (5, 1, 16, 16, 32), ) + dtype = tvm.testing.parameter( + "float32", + "float16", + ) + # TODO(mehrdadh): add dynamic testing @tvm.testing.requires_hexagon - def test_batch_matmul(self, hexagon_session: Session, x_batch, y_batch, M, N, K, dtype): + def test_batch_matmul( + self, hexagon_session: Session, x_batch, y_batch, m_size, n_size, k_size, dtype + ): + """Test batch MatMul.""" if dtype == "float16": pytest.xfail("float16 is not supported.") - x = te.placeholder((x_batch, M, K), name="x") - y = te.placeholder((y_batch, N, K), name="y") + x = te.placeholder((x_batch, m_size, k_size), name="x") + y = te.placeholder((y_batch, n_size, k_size), name="y") def get_ref_data(): - a_np = np.random.uniform(size=(x_batch, M, K)).astype(dtype) - b_np = np.random.uniform(size=(y_batch, N, K)).astype(dtype) + a_np = np.random.uniform(size=(x_batch, m_size, k_size)).astype(dtype) + b_np = np.random.uniform(size=(y_batch, n_size, k_size)).astype(dtype) c_np = tvm.topi.testing.batch_matmul(a_np, b_np) return (a_np, b_np, c_np) @@ -89,7 +93,9 @@ def get_ref_data(): class TestMatMulInt8: - x_batch, y_batch, M, N, K = tvm.testing.parameters( + """Test MatMul INT8 class.""" + + x_batch, y_batch, m_size, n_size, k_size = tvm.testing.parameters( (1, 1, 2, 3, 1), (1, 1, 16, 24, 32), (5, 5, 24, 16, 32), @@ -98,17 +104,29 @@ class TestMatMulInt8: (5, 1, 16, 16, 32), ) + dtype = tvm.testing.parameter( + "float32", + "float16", + ) + @tvm.testing.requires_hexagon - def test_batch_matmul_int8(self, hexagon_session: Session, x_batch, y_batch, M, N, K): + def test_batch_matmul_int8( + self, hexagon_session: Session, x_batch, y_batch, m_size, n_size, k_size + ): + """Test batch matmul INT8.""" dtype = "int8" out_dtype = "int8" assert x_batch == y_batch or x_batch == 1 or y_batch == 1 - x = te.placeholder((x_batch, M, K), name="x", dtype=dtype) - y = te.placeholder((y_batch, N, K), name="y", dtype=dtype) + x = te.placeholder((x_batch, m_size, k_size), name="x", dtype=dtype) + y = te.placeholder((y_batch, n_size, k_size), name="y", dtype=dtype) def get_ref_data(): - a_np = np.random.randint(low=-128, high=127, size=(x_batch, M, K)).astype(dtype) - b_np = np.random.randint(low=-128, high=127, size=(y_batch, N, K)).astype(dtype) + a_np = np.random.randint(low=-128, high=127, size=(x_batch, m_size, k_size)).astype( + dtype + ) + b_np = np.random.randint(low=-128, high=127, size=(y_batch, n_size, k_size)).astype( + dtype + ) c_np = tvm.topi.testing.batch_matmul(a_np, b_np, out_dtype=out_dtype) return (a_np, b_np, c_np) diff --git a/tests/python/contrib/test_hexagon/topi/test_conv2d_fp16_intrin.py b/tests/python/contrib/test_hexagon/topi/test_conv2d_fp16_intrin.py index 5066a532df9b..41fe310d8484 100644 --- a/tests/python/contrib/test_hexagon/topi/test_conv2d_fp16_intrin.py +++ b/tests/python/contrib/test_hexagon/topi/test_conv2d_fp16_intrin.py @@ -76,105 +76,6 @@ def build_conv2d(target): return module -shape_parameters = [ - ( - (1, 8, 4, 3), - (3, 3, 3, 3), - (1, 1), - ), - ( - (1, 10, 14, 3), - (3, 3, 3, 3), - (1, 1), - ), - ( - (1, 14, 6, 3), - (3, 3, 3, 3), - (1, 1), - ), - ( - (1, 14, 6, 3), - (3, 3, 3, 64), - (1, 1), - ), - ( - (1, 14, 6, 3), - (5, 5, 3, 3), - (1, 1), - ), - ( - (1, 8, 8, 3), - (2, 2, 3, 3), - (1, 1), - ), - ( - (1, 14, 6, 64), - (3, 3, 64, 3), - (1, 1), - ), - ( - (1, 4, 4, 40), - (3, 3, 40, 3), - (1, 1), - ), - ( - (1, 4, 4, 3), - (3, 3, 3, 3), - (1, 1), - ), - ( - (1, 5, 5, 3), - (3, 3, 3, 3), - (1, 1), - ), - ( - (1, 6, 6, 3), - (3, 3, 3, 3), - (1, 1), - ), - ( - (1, 7, 7, 3), - (3, 3, 3, 3), - (1, 1), - ), - ( - (1, 8, 8, 3), - (3, 3, 3, 3), - (1, 1), - ), - ( - (1, 8, 8, 3), - (5, 5, 3, 3), - (1, 1), - ), - ( - (1, 8, 8, 64), - (2, 2, 64, 64), - (1, 1), - ), - ( - (1, 8, 4, 3), - (3, 3, 3, 3), - (2, 2), - ), - ( - (1, 14, 6, 3), - (3, 3, 3, 64), - (2, 2), - ), - ( - (1, 14, 6, 3), - (5, 5, 3, 3), - (2, 2), - ), - ( - (1, 8, 8, 3), - (2, 2, 3, 3), - (2, 2), - ), -] - - def gen_config(params): """Utility function to generate useful ids for shape_parameters""" @@ -192,6 +93,104 @@ def gen_config(params): class TestConv2dIntrin: """Test Conv2d Intrin class""" + shape_parameters = [ + ( + (1, 8, 4, 3), + (3, 3, 3, 3), + (1, 1), + ), + ( + (1, 10, 14, 3), + (3, 3, 3, 3), + (1, 1), + ), + ( + (1, 14, 6, 3), + (3, 3, 3, 3), + (1, 1), + ), + ( + (1, 14, 6, 3), + (3, 3, 3, 64), + (1, 1), + ), + ( + (1, 14, 6, 3), + (5, 5, 3, 3), + (1, 1), + ), + ( + (1, 8, 8, 3), + (2, 2, 3, 3), + (1, 1), + ), + ( + (1, 14, 6, 64), + (3, 3, 64, 3), + (1, 1), + ), + ( + (1, 4, 4, 40), + (3, 3, 40, 3), + (1, 1), + ), + ( + (1, 4, 4, 3), + (3, 3, 3, 3), + (1, 1), + ), + ( + (1, 5, 5, 3), + (3, 3, 3, 3), + (1, 1), + ), + ( + (1, 6, 6, 3), + (3, 3, 3, 3), + (1, 1), + ), + ( + (1, 7, 7, 3), + (3, 3, 3, 3), + (1, 1), + ), + ( + (1, 8, 8, 3), + (3, 3, 3, 3), + (1, 1), + ), + ( + (1, 8, 8, 3), + (5, 5, 3, 3), + (1, 1), + ), + ( + (1, 8, 8, 64), + (2, 2, 64, 64), + (1, 1), + ), + ( + (1, 8, 4, 3), + (3, 3, 3, 3), + (2, 2), + ), + ( + (1, 14, 6, 3), + (3, 3, 3, 64), + (2, 2), + ), + ( + (1, 14, 6, 3), + (5, 5, 3, 3), + (2, 2), + ), + ( + (1, 8, 8, 3), + (2, 2, 3, 3), + (2, 2), + ), + ] + config = gen_config(shape_parameters) act_shape, wgt_shape, inp_stride = tvm.testing.parameters(*config.values(), ids=config.keys()) inp_offset = tvm.testing.parameter((0, 0), ids=["offset0x0"]) diff --git a/tests/python/contrib/test_hexagon/topi/test_conv2d_nchw.py b/tests/python/contrib/test_hexagon/topi/test_conv2d_nchw.py index 0b94d6e781a7..9c89427e1b01 100644 --- a/tests/python/contrib/test_hexagon/topi/test_conv2d_nchw.py +++ b/tests/python/contrib/test_hexagon/topi/test_conv2d_nchw.py @@ -28,66 +28,66 @@ from ..infrastructure import get_hexagon_target -dtype = tvm.testing.parameter("float32") -random_seed = tvm.testing.parameter(0) - - -@tvm.testing.fixture -def input_shape(batch, in_channel, in_size): - return (batch, in_channel, in_size, in_size) - - -@tvm.testing.fixture -def weight_shape(num_filter, in_channel, kernel): - return (num_filter, in_channel, kernel, kernel) - - -@tvm.testing.fixture -def bias_shape(num_filter): - return (num_filter, 1, 1) +class BaseConv2DTests: + """Conv2D test class.""" -@tvm.testing.fixture(cache_return_value=True) -def ref_data( - random_seed, - input_shape, - weight_shape, - bias_shape, - dtype, - stride, - padding, - dilation, - add_bias, - apply_relu, -): - np.random.seed(random_seed) + add_bias = tvm.testing.parameter(False) + apply_relu = tvm.testing.parameter(False) + dilation = tvm.testing.parameter(1) + batch = tvm.testing.parameter(1) + dtype = tvm.testing.parameter("float32") - # scipy.signal.convolve2d does not support float16 data types, and - # the python fallback is too slow for general use. Computing - # ref_data in float32 will have fewer rounding errors than the TVM - # float16 compute, but those vary based on schedule anyways. - conv_dtype = "float32" if dtype == "float16" else dtype + random_seed = tvm.testing.parameter(0) - a_np = np.random.uniform(size=input_shape).astype(dtype) - w_np = np.random.uniform(size=weight_shape).astype(dtype) - b_np = np.random.uniform(size=bias_shape).astype(dtype) - dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) - c_np = tvm.topi.testing.conv2d_nchw_python( - a_np.astype(conv_dtype), dw_np.astype(conv_dtype), stride, padding - ).astype(dtype) + @tvm.testing.fixture + def input_shape(self, batch, in_channel, in_size): + return (batch, in_channel, in_size, in_size) - if add_bias: - c_np = c_np + b_np - if apply_relu: - c_np = np.maximum(c_np, 0) - return a_np, w_np, b_np, c_np + @tvm.testing.fixture + def weight_shape(self, num_filter, in_channel, kernel): + return (num_filter, in_channel, kernel, kernel) + @tvm.testing.fixture + def bias_shape(self, num_filter): + return (num_filter, 1, 1) -class BaseConv2DTests: - add_bias = tvm.testing.parameter(False) - apply_relu = tvm.testing.parameter(False) - dilation = tvm.testing.parameter(1) - batch = tvm.testing.parameter(1) + @tvm.testing.fixture(cache_return_value=True) + def ref_data( + self, + random_seed, + input_shape, + weight_shape, + bias_shape, + dtype, + stride, + padding, + dilation, + add_bias, + apply_relu, + ): + """Generate reference data.""" + np.random.seed(random_seed) + + # scipy.signal.convolve2d does not support float16 data types, and + # the python fallback is too slow for general use. Computing + # ref_data in float32 will have fewer rounding errors than the TVM + # float16 compute, but those vary based on schedule anyways. + conv_dtype = "float32" if dtype == "float16" else dtype + + a_np = np.random.uniform(size=input_shape).astype(dtype) + w_np = np.random.uniform(size=weight_shape).astype(dtype) + b_np = np.random.uniform(size=bias_shape).astype(dtype) + dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation)) + c_np = tvm.topi.testing.conv2d_nchw_python( + a_np.astype(conv_dtype), dw_np.astype(conv_dtype), stride, padding + ).astype(dtype) + + if add_bias: + c_np = c_np + b_np + if apply_relu: + c_np = np.maximum(c_np, 0) + return a_np, w_np, b_np, c_np @tvm.testing.requires_hexagon def test_conv2d_nchw( @@ -106,14 +106,15 @@ def test_conv2d_nchw( add_bias, apply_relu, ): + """Test Conv2d NCHW.""" pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel)) padding_sum = pad_top + pad_left + pad_bottom + pad_right a_np, w_np, b_np, c_np = ref_data - A = te.placeholder(a_np.shape, name="A", dtype=dtype) - W = te.placeholder(w_np.shape, name="W", dtype=dtype) + a_tensor = te.placeholder(a_np.shape, name="a_tensor", dtype=dtype) + w_tensor = te.placeholder(w_np.shape, name="w_tensor", dtype=dtype) bias = te.placeholder(b_np.shape, name="bias", dtype=dtype) if "int" in dtype: @@ -121,7 +122,7 @@ def test_conv2d_nchw( elif dtype == "float32": tol = {"rtol": 1e-4, "atol": 2e-4} elif dtype == "float16": - # A summation in float16 with a single accumulator very + # a_tensor summation in float16 with a single accumulator very # quickly runs into large rounding errors. At some point, # this tolerance should be schedule-dependent for to avoid # false negatives. @@ -132,12 +133,14 @@ def test_conv2d_nchw( with tvm.target.Target(get_hexagon_target("v68")): fcompute = topi.nn.conv2d_nchw fschedule = topi.hexagon.schedule_conv2d_nchw - C = fcompute(A, W, (stride, stride), padding, (dilation, dilation), dtype) + c_tensor = fcompute( + a_tensor, w_tensor, (stride, stride), padding, (dilation, dilation), dtype + ) if add_bias: - C = topi.add(C, bias) + c_tensor = topi.add(c_tensor, bias) if apply_relu: - C = topi.nn.relu(C) - s = fschedule([C]) + c_tensor = topi.nn.relu(c_tensor) + s = fschedule([c_tensor]) func_name = "conv2d_{}_{}_{}_{}_{}_{}_{}_{}_{}".format( dtype, @@ -152,19 +155,19 @@ def test_conv2d_nchw( ) func = tvm.build( s, - [A, W, bias, C], + [a_tensor, w_tensor, bias, c_tensor], get_hexagon_target("v68"), name=func_name, ) mod = hexagon_session.load_module(func) dev = hexagon_session.device - a = tvm.nd.array(a_np, dev) - w = tvm.nd.array(w_np, dev) + a_data = tvm.nd.array(a_np, dev) + weight = tvm.nd.array(w_np, dev) b = tvm.nd.array(b_np, dev) - c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev) - mod[func_name](a, w, b, c) + c = tvm.nd.array(np.zeros(get_const_tuple(c_tensor.shape), dtype=c_tensor.dtype), dev) + mod[func_name](a_data, weight, b, c) tvm.testing.assert_allclose(c.numpy(), c_np, **tol) diff --git a/tests/python/contrib/test_hexagon/topi/test_conv2d_nhwc.py b/tests/python/contrib/test_hexagon/topi/test_conv2d_nhwc.py index 2068f1e6e6fc..9edc04db4398 100644 --- a/tests/python/contrib/test_hexagon/topi/test_conv2d_nhwc.py +++ b/tests/python/contrib/test_hexagon/topi/test_conv2d_nhwc.py @@ -27,23 +27,27 @@ from ..infrastructure import get_hexagon_target -dtype = tvm.testing.parameter("float32") +class BaseConv2DTests: + """Test Conv2D base class.""" -@tvm.testing.fixture(cache_return_value=True) -def ref_data(dtype, batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation): - in_height = in_width = in_size - a_shape = (batch, in_height, in_width, in_channel) - w_shape = (kernel, kernel, in_channel, num_filter) + dtype = tvm.testing.parameter("float32") - a_np = np.random.uniform(size=a_shape).astype(dtype) - w_np = np.random.uniform(size=w_shape).astype(dtype) - dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) - b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) - return a_np, w_np, b_np + @tvm.testing.fixture(cache_return_value=True) + def ref_data( + self, dtype, batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation + ): + """Generate reference data.""" + in_height = in_width = in_size + a_shape = (batch, in_height, in_width, in_channel) + w_shape = (kernel, kernel, in_channel, num_filter) + a_np = np.random.uniform(size=a_shape).astype(dtype) + w_np = np.random.uniform(size=w_shape).astype(dtype) + dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1)) + b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding) + return a_np, w_np, b_np -class BaseConv2DTests: @tvm.testing.requires_hexagon def test_conv2d_nhwc( self, @@ -59,16 +63,17 @@ def test_conv2d_nhwc( padding, dilation, ): + """Test Conv2D NHWC.""" a_np, w_np, b_np = ref_data - A = te.placeholder(a_np.shape, name="A", dtype=dtype) - W = te.placeholder(w_np.shape, name="W", dtype=dtype) + a_tensor = te.placeholder(a_np.shape, name="a_tensor", dtype=dtype) + w_tensor = te.placeholder(w_np.shape, name="w_tensor", dtype=dtype) with tvm.target.Target(get_hexagon_target("v68")): fcompute = topi.nn.conv2d_nhwc fschedule = topi.hexagon.schedule_conv2d_nhwc - B = fcompute(A, W, stride, padding, dilation, dtype) - s = fschedule([B]) + b_tensor = fcompute(a_tensor, w_tensor, stride, padding, dilation, dtype) + s = fschedule([b_tensor]) func_name = "conv2d_{}_{}_{}_{}_{}_{}_{}_{}_{}".format( dtype, @@ -81,15 +86,17 @@ def test_conv2d_nhwc( padding, dilation, ) - func = tvm.build(s, [A, W, B], get_hexagon_target("v68"), name=func_name) + func = tvm.build( + s, [a_tensor, w_tensor, b_tensor], get_hexagon_target("v68"), name=func_name + ) mod = hexagon_session.load_module(func) dev = hexagon_session.device - a = tvm.nd.array(a_np, dev) - w = tvm.nd.array(w_np, dev) - b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) + a_data = tvm.nd.array(a_np, dev) + weight = tvm.nd.array(w_np, dev) + b = tvm.nd.array(np.zeros(get_const_tuple(b_tensor.shape), dtype=b_tensor.dtype), dev) - mod[func_name](a, w, b) + mod[func_name](a_data, weight, b) tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) diff --git a/tests/python/contrib/test_hexagon/topi/test_conv2d_transpose.py b/tests/python/contrib/test_hexagon/topi/test_conv2d_transpose.py index 40c8efa1cec2..d19223a42d74 100644 --- a/tests/python/contrib/test_hexagon/topi/test_conv2d_transpose.py +++ b/tests/python/contrib/test_hexagon/topi/test_conv2d_transpose.py @@ -29,45 +29,12 @@ # TODO Should add kernal to tvm.testing.fixture -random_seed = tvm.testing.parameter(0) +class BaseConv2DTransposeTests: + """Conv2D transpose base class.""" -@tvm.testing.fixture -def shift_shape(batch): - return batch - - -@tvm.testing.fixture -def shift_shape(in_channel): - return in_channel - - -@tvm.testing.fixture -def shift_shape(in_size): - return in_size - - -@tvm.testing.fixture -def shift_shape(num_filter): - return num_filter - - -@tvm.testing.fixture -def shift_shape(stride): - return stride - - -@tvm.testing.fixture -def shift_shape(padding): - return padding - - -@tvm.testing.fixture -def shift_shape(output_padding): - return output_padding - + random_seed = tvm.testing.parameter(0) -class BaseConv2DTransposeTests: @tvm.testing.requires_hexagon def test_conv2d( self, @@ -81,17 +48,20 @@ def test_conv2d( output_padding, random_seed, ): + """Test conv2D.""" in_height, in_width = in_size kernel_height, kernel_width = (1, 1) stride_height, stride_width = stride pad_top, pad_left, pad_bottom, pad_right = padding - A = te.placeholder((batch, in_channel, in_height, in_width), name="A") - W = te.placeholder((in_channel, num_filter, kernel_height, kernel_width), name="W") + a_tensor = te.placeholder((batch, in_channel, in_height, in_width), name="a_tensor") + w_tensor = te.placeholder( + (in_channel, num_filter, kernel_height, kernel_width), name="w_tensor" + ) - a_shape = get_const_tuple(A.shape) - w_shape = get_const_tuple(W.shape) - dtype = A.dtype + a_shape = get_const_tuple(a_tensor.shape) + w_shape = get_const_tuple(w_tensor.shape) + dtype = a_tensor.dtype def get_ref_data(): @@ -107,42 +77,43 @@ def get_ref_data(): a_np, w_np, b_np, c_np = get_ref_data() fcompute_args = ( - A, - W, + a_tensor, + w_tensor, [stride_height, stride_width], [pad_top, pad_left, pad_bottom, pad_right], - A.dtype, + a_tensor.dtype, output_padding, ) with tvm.target.Target(get_hexagon_target("v68")): fcompute = topi.nn.conv2d_transpose_nchw fschedule = topi.hexagon.schedule_conv2d_transpose_nchw - B = fcompute(*fcompute_args) - C = topi.nn.relu(B) - s1 = fschedule([B]) - s2 = fschedule([C]) + b_tensor = fcompute(*fcompute_args) + c_tensor = topi.nn.relu(b_tensor) + schedule_1 = fschedule([b_tensor]) + schedule_2 = fschedule([c_tensor]) dev = hexagon_session.device - a = tvm.nd.array(a_np, dev) - w = tvm.nd.array(w_np, dev) - b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) - c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev) + a_data = tvm.nd.array(a_np, dev) + weight = tvm.nd.array(w_np, dev) + b = tvm.nd.array(np.zeros(get_const_tuple(b_tensor.shape), dtype=b_tensor.dtype), dev) + c = tvm.nd.array(np.zeros(get_const_tuple(c_tensor.shape), dtype=c_tensor.dtype), dev) - func1 = tvm.build(s1, [A, W, B], get_hexagon_target("v68")) - func2 = tvm.build(s2, [A, W, C], get_hexagon_target("v68")) + func1 = tvm.build(schedule_1, [a_tensor, w_tensor, b_tensor], get_hexagon_target("v68")) + func2 = tvm.build(schedule_2, [a_tensor, w_tensor, c_tensor], get_hexagon_target("v68")) mod1 = hexagon_session.load_module(func1) mod2 = hexagon_session.load_module(func2) - mod1(a, w, b) - mod2(a, w, c) + mod1(a_data, weight, b) + mod2(a_data, weight, c) tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5) class TestConv2DTranspose(BaseConv2DTransposeTests): + """Test Conv2D transpose class.""" (batch, in_channel, in_size, num_filter, stride) = tvm.testing.parameters( (1, 3, (224, 224), 1, (1, 1)), diff --git a/tests/python/contrib/test_hexagon/topi/test_dense.py b/tests/python/contrib/test_hexagon/topi/test_dense.py index c76006ac08c2..fff4fd989f6d 100644 --- a/tests/python/contrib/test_hexagon/topi/test_dense.py +++ b/tests/python/contrib/test_hexagon/topi/test_dense.py @@ -28,89 +28,101 @@ from ..infrastructure import get_hexagon_target -random_seed = tvm.testing.parameter(0) - -use_bias = tvm.testing.parameter(True, False) - -# batch_size more than 8 would break -batch_size = tvm.testing.parameter(1, 2, 8) - -in_dim, out_dim = tvm.testing.parameters((1024, 1000)) - -in_dtype, out_dtype = tvm.testing.parameters( - ("float32", "float32"), - ("float16", "float32"), - ("int8", "int32"), -) - - -@tvm.testing.fixture(cache_return_value=True) -def dense_ref_data(random_seed, batch_size, in_dim, out_dim, use_bias, in_dtype, out_dtype): - np.random.seed(random_seed) - - if "float" in in_dtype: - a_np = np.random.uniform(size=(batch_size, in_dim)).astype(in_dtype) - b_np = np.random.uniform(size=(out_dim, in_dim)).astype(in_dtype) - c_np = np.random.uniform(size=(out_dim,)).astype(out_dtype) - elif in_dtype == "int8": - a_np = np.random.randint(low=-128, high=127, size=(batch_size, in_dim)).astype(in_dtype) - b_np = np.random.randint(low=-128, high=127, size=(out_dim, in_dim)).astype(in_dtype) - c_np = np.random.randint(low=-128, high=127, size=(out_dim,)).astype(out_dtype) - else: - raise ValueError("No method to generate test data for data type '{}'".format(in_dtype)) - - matmul = np.dot(a_np.astype(out_dtype), b_np.T.astype(out_dtype)) - - if use_bias: - matmul += c_np - - d_np = np.maximum(matmul, 0) - return (a_np, b_np, c_np, d_np) - - -@tvm.testing.requires_hexagon -def test_dense( - hexagon_session: Session, - batch_size, - in_dim, - out_dim, - use_bias, - in_dtype, - out_dtype, - dense_ref_data, -): - if in_dtype == "float16": - pytest.xfail("float16 is not supported.") - - if "int" in in_dtype: - tol = {"atol": 0, "rtol": 0} - elif in_dtype == "float32": - tol = {"rtol": 1e-5, "atol": 1e-5} - - A = te.placeholder((batch_size, in_dim), name="A", dtype=in_dtype) - B = te.placeholder((out_dim, in_dim), name="B", dtype=in_dtype) - C = te.placeholder((out_dim,), name="C", dtype=out_dtype) - - a_np, b_np, c_np, d_np = dense_ref_data - - fcompute = topi.nn.dense - fschedule = topi.hexagon.schedule_dense - - with tvm.target.Target(get_hexagon_target("v68")): - D = fcompute(A, B, C if use_bias else None, out_dtype) - D = topi.nn.relu(D) - s = fschedule([D]) - - func = tvm.build(s, [A, B, C, D], get_hexagon_target("v68"), name="dense") - mod = hexagon_session.load_module(func) - - dev = hexagon_session.device - a = tvm.nd.array(a_np, dev) - b = tvm.nd.array(b_np, dev) - c = tvm.nd.array(c_np, dev) - d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=out_dtype), dev) - mod["dense"](a, b, c, d) - tvm.testing.assert_allclose(d.numpy(), d_np, **tol) + +class TestDense: + """Dense test class.""" + + random_seed = tvm.testing.parameter(0) + + use_bias = tvm.testing.parameter(True, False) + + # batch_size more than 8 would break + batch_size = tvm.testing.parameter(1, 2, 8) + + in_dim, out_dim = tvm.testing.parameters((1024, 1000)) + + in_dtype, out_dtype = tvm.testing.parameters( + ("float32", "float32"), + ("float16", "float32"), + ("int8", "int32"), + ) + + @tvm.testing.fixture(cache_return_value=True) + def dense_ref_data( + self, random_seed, batch_size, in_dim, out_dim, use_bias, in_dtype, out_dtype + ): + """Generate reference data.""" + np.random.seed(random_seed) + + if "float" in in_dtype: + a_np = np.random.uniform(size=(batch_size, in_dim)).astype(in_dtype) + b_np = np.random.uniform(size=(out_dim, in_dim)).astype(in_dtype) + c_np = np.random.uniform(size=(out_dim,)).astype(out_dtype) + elif in_dtype == "int8": + a_np = np.random.randint(low=-128, high=127, size=(batch_size, in_dim)).astype(in_dtype) + b_np = np.random.randint(low=-128, high=127, size=(out_dim, in_dim)).astype(in_dtype) + c_np = np.random.randint(low=-128, high=127, size=(out_dim,)).astype(out_dtype) + else: + raise ValueError("No method to generate test data for data type '{}'".format(in_dtype)) + + matmul = np.dot(a_np.astype(out_dtype), b_np.T.astype(out_dtype)) + + if use_bias: + matmul += c_np + + d_np = np.maximum(matmul, 0) + return (a_np, b_np, c_np, d_np) + + @tvm.testing.requires_hexagon + def test_dense( + self, + hexagon_session: Session, + batch_size, + in_dim, + out_dim, + use_bias, + in_dtype, + out_dtype, + dense_ref_data, + ): + """Test dense.""" + if in_dtype == "float16": + pytest.xfail("float16 is not supported.") + + if "int" in in_dtype: + tol = {"atol": 0, "rtol": 0} + elif in_dtype == "float32": + tol = {"rtol": 1e-5, "atol": 1e-5} + + a_tensor = te.placeholder((batch_size, in_dim), name="a_tensor", dtype=in_dtype) + b_tensor = te.placeholder((out_dim, in_dim), name="b_tensor", dtype=in_dtype) + c_tensor = te.placeholder((out_dim,), name="c_tensor", dtype=out_dtype) + + a_np, b_np, c_np, d_np = dense_ref_data + + fcompute = topi.nn.dense + fschedule = topi.hexagon.schedule_dense + + with tvm.target.Target(get_hexagon_target("v68")): + d_tensor = fcompute(a_tensor, b_tensor, c_tensor if use_bias else None, out_dtype) + d_tensor = topi.nn.relu(d_tensor) + schedule = fschedule([d_tensor]) + + func = tvm.build( + schedule, + [a_tensor, b_tensor, c_tensor, d_tensor], + get_hexagon_target("v68"), + name="dense", + ) + mod = hexagon_session.load_module(func) + + dev = hexagon_session.device + a_data = tvm.nd.array(a_np, dev) + b_data = tvm.nd.array(b_np, dev) + c_data = tvm.nd.array(c_np, dev) + d_data = tvm.nd.array(np.zeros(get_const_tuple(d_tensor.shape), dtype=out_dtype), dev) + mod["dense"](a_data, b_data, c_data, d_data) + tvm.testing.assert_allclose(d_data.numpy(), d_np, **tol) if __name__ == "__main__": diff --git a/tests/python/contrib/test_hexagon/topi/test_depth_to_space.py b/tests/python/contrib/test_hexagon/topi/test_depth_to_space.py index 3de9ec13497a..0cb41b595255 100644 --- a/tests/python/contrib/test_hexagon/topi/test_depth_to_space.py +++ b/tests/python/contrib/test_hexagon/topi/test_depth_to_space.py @@ -19,7 +19,6 @@ """Test depth_to_space slice op for hexagon""" import numpy as np -import pytest import tvm from tvm import te @@ -30,28 +29,27 @@ from ..infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target -d2s_fp16_tests = ( - ((1, 8, 8, 256), 2, "CDR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), - ((1, 8, 8, 1024), 4, "CDR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), - ((1, 16, 16, 256), 2, "CDR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), - ((1, 16, 16, 1024), 4, "CDR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), - ((1, 8, 8, 256), 2, "DCR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), - ((1, 8, 8, 1024), 4, "DCR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), - ((1, 16, 16, 256), 2, "DCR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), - ((1, 16, 16, 1024), 4, "DCR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), -) - -d2s_uint8_tests = ( - ((1, 8, 8, 256), 2, "CDR", "uint8", "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d"), - ((1, 8, 8, 1024), 4, "CDR", "uint8", "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d"), - ((1, 8, 8, 256), 2, "DCR", "uint8", "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d"), - ((1, 8, 8, 1024), 4, "DCR", "uint8", "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d"), -) - - class TestD2SSlice: """Test class that defines the Depth to Space slice test""" + d2s_fp16_tests = ( + ((1, 8, 8, 256), 2, "CDR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), + ((1, 8, 8, 1024), 4, "CDR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), + ((1, 16, 16, 256), 2, "CDR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), + ((1, 16, 16, 1024), 4, "CDR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), + ((1, 8, 8, 256), 2, "DCR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), + ((1, 8, 8, 1024), 4, "DCR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), + ((1, 16, 16, 256), 2, "DCR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), + ((1, 16, 16, 1024), 4, "DCR", "float16", "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d"), + ) + + d2s_uint8_tests = ( + ((1, 8, 8, 256), 2, "CDR", "uint8", "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d"), + ((1, 8, 8, 1024), 4, "CDR", "uint8", "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d"), + ((1, 8, 8, 256), 2, "DCR", "uint8", "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d"), + ((1, 8, 8, 1024), 4, "DCR", "uint8", "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d"), + ) + (input_shape, block_size, mode, dtype, input_layout, output_layout,) = tvm.testing.parameters( *d2s_fp16_tests, *d2s_uint8_tests, @@ -93,11 +91,11 @@ def test_d2s_slice( transformed_ref_output_np, ): """Top level testing function for depth to space""" - Input = te.placeholder(input_shape, name="Input", dtype=dtype) + input_tensor = te.placeholder(input_shape, name="input_tensor", dtype=dtype) - Output = d2s_compute(Input, block_size, "NHWC", mode) + output = d2s_compute(input_tensor, block_size, "NHWC", mode) - tir_s = d2s_schedule(Input, Output, input_layout, output_layout) + tir_s = d2s_schedule(input_tensor, output, input_layout, output_layout) input_data = allocate_hexagon_array( hexagon_session.device, @@ -114,7 +112,10 @@ def test_d2s_slice( ) with tvm.transform.PassContext(opt_level=3): runtime_module = tvm.build( - tir_s.mod, [Input, Output], target=get_hexagon_target("v69"), name="depth_to_space" + tir_s.mod, + [input_tensor, output], + target=get_hexagon_target("v69"), + name="depth_to_space", ) mod = hexagon_session.load_module(runtime_module) diff --git a/tests/python/contrib/test_hexagon/topi/test_depthwise_conv2d.py b/tests/python/contrib/test_hexagon/topi/test_depthwise_conv2d.py index 063541cc21a0..f95d41093043 100644 --- a/tests/python/contrib/test_hexagon/topi/test_depthwise_conv2d.py +++ b/tests/python/contrib/test_hexagon/topi/test_depthwise_conv2d.py @@ -14,8 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. - -import sys +"""Depthwise Conv2D Tests.""" import numpy as np @@ -30,121 +29,6 @@ from ..infrastructure import get_hexagon_target -random_seed = tvm.testing.parameter(0) - -in_dtype, out_dtype = tvm.testing.parameters( - ("float32", "float32"), -) - - -@tvm.testing.fixture -def input_shape(layout, batch, in_channel, in_size, filter_shape): - if layout == "NCHW": - return (batch, in_channel, in_size, in_size) - elif layout == "NHWC": - return (batch, in_size, in_size, in_channel) - elif layout == "NCHWc": - oc_block = filter_shape[-1] - ic_block = next(bn for bn in range(oc_block, 0, -1) if in_channel % bn == 0) - return (batch, in_channel // ic_block, in_size, in_size, ic_block) - - -@tvm.testing.fixture -def filter_shape(layout, in_channel, channel_multiplier, kernel): - filter_channel = in_channel - if layout == "NCHW": - return (filter_channel, channel_multiplier, kernel, kernel) - elif layout == "NHWC": - return (kernel, kernel, filter_channel, channel_multiplier) - elif layout == "NCHWc": - out_channel = in_channel * channel_multiplier - # For testing the functionality, we choose an arbitrary block - # size that can divide out_channel, regardless of the - # performance. - oc_block = next(bn for bn in range(16, 0, -1) if out_channel % bn == 0) - return (out_channel // oc_block, 1, kernel, kernel, 1, oc_block) - - -@tvm.testing.fixture -def scale_shape(layout, in_channel, channel_multiplier, filter_shape): - out_channel = in_channel * channel_multiplier - - if layout in ("NCHW", "NHWC"): - return (out_channel,) - - if layout == "NCHWc": - oc_block = filter_shape[-1] - return (out_channel // oc_block, oc_block) - - raise ValueError("Unknown layout {}".format(layout)) - - -@tvm.testing.fixture -def shift_shape(scale_shape): - return scale_shape - - -@tvm.testing.fixture(cache_return_value=True) -def ref_data( - random_seed, - in_dtype, - out_dtype, - layout, - input_shape, - filter_shape, - dilation, - stride, - padding, - scale_shape, - shift_shape, - use_scale_shift, - apply_relu, -): - np.random.seed(random_seed) - - print(input_shape) - - # scipy.signal.convolve2d does not support float16 data types, and - # the python fallback is too slow for general use. Computing - # ref_data in float32 will have fewer rounding errors than the TVM - # float16 compute, but those vary based on schedule anyways. - conv_dtype = "float32" if in_dtype == "float16" else in_dtype - - input_np = np.random.uniform(size=input_shape).astype(in_dtype) - filter_np = np.random.uniform(size=filter_shape).astype(in_dtype) - scale_np = np.random.uniform(size=scale_shape).astype(out_dtype) - shift_np = np.random.uniform(size=shift_shape).astype(out_dtype) - if layout == "NCHW": - np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nchw - dilation = (1, 1, dilation, dilation) - reshape = (1, -1, 1, 1) - elif layout == "NHWC": - np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nhwc - dilation = (dilation, dilation, 1, 1) - reshape = (1, 1, 1, -1) - elif layout == "NCHWc": - np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nchwc - dilation = (1, 1, dilation, dilation, 1, 1) - reshape = (1, scale_shape[0], 1, 1, scale_shape[1]) - - dilated_filter_np = tvm.topi.testing.dilate_python(filter_np, dilation) - output_np = np_depthwise_conv2d( - input_np.astype(conv_dtype), dilated_filter_np.astype(conv_dtype), stride, padding - ).astype(out_dtype) - - if use_scale_shift: - output_np = output_np * scale_np.reshape(reshape) + shift_np.reshape(reshape) - if apply_relu: - output_np = np.maximum(output_np, 0) - - return ( - input_np, - filter_np, - scale_np, - shift_np, - output_np, - ) - class BaseDepthwiseConv2D: """Provides the test_conv2d test function, to be used by other test classes. @@ -154,6 +38,124 @@ class BaseDepthwiseConv2D: (e.g. implemented only for llvm). """ + random_seed = tvm.testing.parameter(0) + + in_dtype, out_dtype = tvm.testing.parameters( + ("float32", "float32"), + ) + + @tvm.testing.fixture + def input_shape(self, layout, batch, in_channel, in_size, filter_shape): + """Returns input shape.""" + if layout == "NCHW": + return (batch, in_channel, in_size, in_size) + elif layout == "NHWC": + return (batch, in_size, in_size, in_channel) + elif layout == "NCHWc": + oc_block = filter_shape[-1] + ic_block = next(bn for bn in range(oc_block, 0, -1) if in_channel % bn == 0) + return (batch, in_channel // ic_block, in_size, in_size, ic_block) + else: + raise RuntimeError(f"Not supported layout {layout}") + + @tvm.testing.fixture + def filter_shape(self, layout, in_channel, channel_multiplier, kernel): + """Returns filter shape.""" + filter_channel = in_channel + if layout == "NCHW": + return (filter_channel, channel_multiplier, kernel, kernel) + elif layout == "NHWC": + return (kernel, kernel, filter_channel, channel_multiplier) + elif layout == "NCHWc": + out_channel = in_channel * channel_multiplier + # For testing the functionality, we choose an arbitrary block + # size that can divide out_channel, regardless of the + # performance. + oc_block = next(bn for bn in range(16, 0, -1) if out_channel % bn == 0) + return (out_channel // oc_block, 1, kernel, kernel, 1, oc_block) + else: + raise RuntimeError(f"Not supported layout {layout}") + + @tvm.testing.fixture + def scale_shape(self, layout, in_channel, channel_multiplier, filter_shape): + """Returns scale shape.""" + out_channel = in_channel * channel_multiplier + + if layout in ("NCHW", "NHWC"): + return (out_channel,) + + if layout == "NCHWc": + oc_block = filter_shape[-1] + return (out_channel // oc_block, oc_block) + + raise ValueError("Unknown layout {}".format(layout)) + + @tvm.testing.fixture + def shift_shape(self, scale_shape): + """Returns shift shape.""" + return scale_shape + + @tvm.testing.fixture(cache_return_value=True) + def ref_data( + self, + random_seed, + in_dtype, + out_dtype, + layout, + input_shape, + filter_shape, + dilation, + stride, + padding, + scale_shape, + shift_shape, + use_scale_shift, + apply_relu, + ): + """Generate reference data.""" + np.random.seed(random_seed) + + # scipy.signal.convolve2d does not support float16 data types, and + # the python fallback is too slow for general use. Computing + # ref_data in float32 will have fewer rounding errors than the TVM + # float16 compute, but those vary based on schedule anyways. + conv_dtype = "float32" if in_dtype == "float16" else in_dtype + + input_np = np.random.uniform(size=input_shape).astype(in_dtype) + filter_np = np.random.uniform(size=filter_shape).astype(in_dtype) + scale_np = np.random.uniform(size=scale_shape).astype(out_dtype) + shift_np = np.random.uniform(size=shift_shape).astype(out_dtype) + if layout == "NCHW": + np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nchw + dilation = (1, 1, dilation, dilation) + reshape = (1, -1, 1, 1) + elif layout == "NHWC": + np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nhwc + dilation = (dilation, dilation, 1, 1) + reshape = (1, 1, 1, -1) + elif layout == "NCHWc": + np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nchwc + dilation = (1, 1, dilation, dilation, 1, 1) + reshape = (1, scale_shape[0], 1, 1, scale_shape[1]) + + dilated_filter_np = tvm.topi.testing.dilate_python(filter_np, dilation) + output_np = np_depthwise_conv2d( + input_np.astype(conv_dtype), dilated_filter_np.astype(conv_dtype), stride, padding + ).astype(out_dtype) + + if use_scale_shift: + output_np = output_np * scale_np.reshape(reshape) + shift_np.reshape(reshape) + if apply_relu: + output_np = np.maximum(output_np, 0) + + return ( + input_np, + filter_np, + scale_np, + shift_np, + output_np, + ) + @tvm.testing.requires_hexagon def test_conv2d( self, @@ -167,15 +169,13 @@ def test_conv2d( shift_shape, use_scale_shift, apply_relu, - batch, - in_channel, - channel_multiplier, kernel, stride, padding, dilation, ref_data, ): + """Test conv2D.""" # Transform the padding argument from 'str' to 'tuple' to # match the "workload" tuple in TopHub. Which padding_args to # use for each layout chosen to reproduce previous behavior. @@ -187,26 +187,26 @@ def test_conv2d( padding_args = padding # placeholder - Input = te.placeholder(input_shape, name="Input", dtype=in_dtype) - Filter = te.placeholder(filter_shape, name="Filter", dtype=in_dtype) - Scale = te.placeholder(scale_shape, name="Scale", dtype=out_dtype) - Shift = te.placeholder(shift_shape, name="Shift", dtype=out_dtype) + input_tensor = te.placeholder(input_shape, name="input_tensor", dtype=in_dtype) + filter_tensor = te.placeholder(filter_shape, name="filter_tensor", dtype=in_dtype) + scale = te.placeholder(scale_shape, name="scale", dtype=out_dtype) + shift = te.placeholder(shift_shape, name="shift", dtype=out_dtype) if layout == "NCHW": topi_scale_shift = topi.nn.scale_shift_nchw - fcompute_args = (Input, Filter, stride, padding_args, dilation, out_dtype) + fcompute_args = (input_tensor, filter_tensor, stride, padding_args, dilation, out_dtype) elif layout == "NHWC": topi_scale_shift = topi.nn.scale_shift_nhwc - fcompute_args = (Input, Filter, stride, padding_args, dilation, out_dtype) + fcompute_args = (input_tensor, filter_tensor, stride, padding_args, dilation, out_dtype) elif layout == "NCHWc": topi_scale_shift = topi.nn.scale_shift_nchwc in_layout = "NCHW{}c".format(input_shape[-1]) out_layout = "NCHW{}c".format(filter_shape[-1]) fcompute_args = ( - Input, - Filter, + input_tensor, + filter_tensor, stride, padding, dilation, @@ -223,18 +223,18 @@ def test_conv2d( elif layout == "NHWC": fcompute = topi.nn.depthwise_conv2d_nhwc fschedule = topi.hexagon.schedule_depthwise_conv2d_nhwc - C = fcompute(*fcompute_args) + c_tensor = fcompute(*fcompute_args) if use_scale_shift: - C = topi_scale_shift(C, Scale, Shift) + c_tensor = topi_scale_shift(c_tensor, scale, shift) if apply_relu: - C = topi.nn.relu(C) + c_tensor = topi.nn.relu(c_tensor) - s = fschedule([C]) + schedule = fschedule([c_tensor]) # Build and run f = tvm.build( - s, - [Input, Filter, Scale, Shift, C], + schedule, + [input_tensor, filter_tensor, scale, shift, c_tensor], get_hexagon_target("v68"), ) mod = hexagon_session.load_module(f) @@ -247,7 +247,7 @@ def test_conv2d( scale_tvm = tvm.nd.array(scale_np, dev) shift_tvm = tvm.nd.array(shift_np, dev) output_tvm = tvm.nd.array( - np.zeros(shape=get_const_tuple(C.shape), dtype=C.dtype), + np.zeros(shape=get_const_tuple(c_tensor.shape), dtype=c_tensor.dtype), dev, ) @@ -257,7 +257,7 @@ def test_conv2d( tvm.testing.assert_allclose(output_np, output_tvm.numpy(), **tol) -class TestDepthwiseConv2D_MobilenetWorkloads(BaseDepthwiseConv2D): +class TestDepthwiseConv2DMobilenetWorkloads(BaseDepthwiseConv2D): """Extra tests to verify functionality for workloads used by mobilenet.""" layout = tvm.testing.parameter("NCHW", "NHWC") @@ -280,6 +280,7 @@ class TestDepthwiseConv2D_MobilenetWorkloads(BaseDepthwiseConv2D): class TestDepthwiseConv2D(BaseDepthwiseConv2D): + """Test depthwise conv2D class.""" layout = tvm.testing.parameter("NCHW", "NHWC") use_scale_shift = tvm.testing.parameter(True, False, ids=["with_scale_shift", "no_scale_shift"]) diff --git a/tests/python/contrib/test_hexagon/topi/test_pad.py b/tests/python/contrib/test_hexagon/topi/test_pad.py index 06b939bf6409..18a392e5b1ac 100644 --- a/tests/python/contrib/test_hexagon/topi/test_pad.py +++ b/tests/python/contrib/test_hexagon/topi/test_pad.py @@ -27,25 +27,26 @@ @tvm.testing.requires_hexagon def test_nn_pad(hexagon_session: Session): + """Test nn pad.""" dtype = "uint8" in_shape = (1, 56, 56, 32) data_in = np.ones(in_shape).astype(dtype) - A = te.placeholder(shape=in_shape, name="A", dtype=dtype) + a_tensor = te.placeholder(shape=in_shape, name="a_tensor", dtype=dtype) - C = topi.nn.pad(A, [0, 1, 1, 0], [0, 1, 1, 0], pad_value=0) + c_tensor = topi.nn.pad(a_tensor, [0, 1, 1, 0], [0, 1, 1, 0], pad_value=0) with tvm.target.Target(get_hexagon_target("v68")): fschedule = topi.hexagon.schedule_pad - s = fschedule(C) + s = fschedule(c_tensor) - func = tvm.build(s, [A, C], get_hexagon_target("v68"), name="pad") + func = tvm.build(s, [a_tensor, c_tensor], get_hexagon_target("v68"), name="pad") mod = hexagon_session.load_module(func) dev = hexagon_session.device a = tvm.nd.array(data_in, dev) - b = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev) + b = tvm.nd.array(np.zeros(get_const_tuple(c_tensor.shape), dtype=c_tensor.dtype), dev) mod["pad"](a, b) # Reference numpy pad output diff --git a/tests/python/contrib/test_hexagon/topi/test_pooling.py b/tests/python/contrib/test_hexagon/topi/test_pooling.py index ecc998875296..5ae857c2dca5 100644 --- a/tests/python/contrib/test_hexagon/topi/test_pooling.py +++ b/tests/python/contrib/test_hexagon/topi/test_pooling.py @@ -29,6 +29,8 @@ class TestAdaptivePool: + """Adaptive pool test class.""" + dshape, out_size, pool_type, layout = tvm.testing.parameters( ((1, 3, 112, 112), (1, 1), "max", "NCHW"), ((1, 3, 112, 112), (1, 1), "avg", "NCHW"), @@ -58,6 +60,7 @@ class TestAdaptivePool: @tvm.testing.requires_hexagon def test_adaptive_pool(self, hexagon_session: Session, dshape, out_size, pool_type, layout): + """Test adaptive pool.""" dtype = "float32" np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype) np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout) @@ -103,11 +106,12 @@ def verify_poolnd( count_include_pad=True, layout="NCW", ): - A = te.placeholder(input_shape, name="A") + """Pool test verification.""" + a_tensor = te.placeholder(input_shape, name="a_tensor") if n == 1: - B = topi.nn.pool1d( - A, + b_tensor = topi.nn.pool1d( + a_tensor, kernel=kernel, stride=stride, dilation=dilation, @@ -118,8 +122,8 @@ def verify_poolnd( count_include_pad=count_include_pad, ) elif n == 2: - B = topi.nn.pool2d( - A, + b_tensor = topi.nn.pool2d( + a_tensor, kernel=kernel, stride=stride, dilation=dilation, @@ -130,8 +134,8 @@ def verify_poolnd( count_include_pad=count_include_pad, ) elif n == 3: - B = topi.nn.pool3d( - A, + b_tensor = topi.nn.pool3d( + a_tensor, kernel=kernel, stride=stride, dilation=dilation, @@ -144,9 +148,9 @@ def verify_poolnd( else: raise ValueError(f"PoolND only supports n=1, 2, 3 got n={n}") - B = topi.nn.relu(B) - dtype = A.dtype - output_shape = [int(i) for i in B.shape] + b_tensor = topi.nn.relu(b_tensor) + dtype = a_tensor.dtype + output_shape = [int(i) for i in b_tensor.shape] input_np = np.random.uniform(low=0.001, size=input_shape).astype(dtype) @@ -169,20 +173,22 @@ def verify_poolnd( with tvm.target.Target(get_hexagon_target("v68")): fschedule = topi.hexagon.schedule_pool - s = fschedule(B, layout) + s = fschedule(b_tensor, layout) - func = tvm.build(s, [A, B], get_hexagon_target("v68"), name="pool") + func = tvm.build(s, [a_tensor, b_tensor], get_hexagon_target("v68"), name="pool") mod = hexagon_session.load_module(func) dev = hexagon_session.device a = tvm.nd.array(input_np, dev) - b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev) + b = tvm.nd.array(np.zeros(get_const_tuple(b_tensor.shape), dtype=dtype), dev) mod["pool"](a, b) tvm.testing.assert_allclose(b.numpy(), ref_np, rtol=1e-5) class TestPool1D: + """Pool1D test class.""" + ( input_shape, kernel, @@ -244,6 +250,7 @@ def test_pool1d( count_include_pad, layout, ): + """Test Pool1D.""" verify_poolnd( hexagon_session, 1, @@ -260,6 +267,8 @@ def test_pool1d( class TestPool2D: + """Pool2D test class.""" + ( input_shape, kernel, @@ -321,6 +330,7 @@ def test_pool2d( count_include_pad, layout, ): + """Test Pool2D.""" verify_poolnd( hexagon_session, 2, @@ -337,6 +347,8 @@ def test_pool2d( class TestPool3D: + """Pool3D test class.""" + ( input_shape, kernel, @@ -719,6 +731,7 @@ def test_pool3d( count_include_pad, layout, ): + """Test Pool3D.""" verify_poolnd( hexagon_session, 3, diff --git a/tests/python/contrib/test_hexagon/topi/test_quantize.py b/tests/python/contrib/test_hexagon/topi/test_quantize.py old mode 100755 new mode 100644 index 0b6e1dfa0e73..a188f7cb2fe1 --- a/tests/python/contrib/test_hexagon/topi/test_quantize.py +++ b/tests/python/contrib/test_hexagon/topi/test_quantize.py @@ -14,6 +14,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +"""TIR quantize schedule tests.""" import numpy as np import tvm @@ -26,30 +27,31 @@ get_hexagon_target, ) +QUANTIZE_SCALE = None +QUANTIZE_ZERO_POINT = None -@tvm.testing.fixture -def expected_output_np(input_np, output_dtype): - global scale, zero_point - quant_np, scale, zero_point = quantize_np(input_np, output_dtype) - return quant_np - - -@tvm.testing.fixture -def input_np(input_shape, input_dtype): - return np.random.random(input_shape).astype(input_dtype) +class TestQuantize: + """Test quantize class.""" -@tvm.testing.fixture -def transformed_input_np(input_np, input_crouton_layout): - return transform_numpy(input_np, "nhwc", input_crouton_layout) + @tvm.testing.fixture + def expected_output_np(self, input_np, output_dtype): + global QUANTIZE_SCALE, QUANTIZE_ZERO_POINT + quant_np, QUANTIZE_SCALE, QUANTIZE_ZERO_POINT = quantize_np(input_np, output_dtype) + return quant_np + @tvm.testing.fixture + def input_np(self, input_shape, input_dtype): + return np.random.random(input_shape).astype(input_dtype) -@tvm.testing.fixture -def transformed_expected_output_np(expected_output_np, output_layout): - return transform_numpy(expected_output_np, "nhwc", output_layout) + @tvm.testing.fixture + def transformed_input_np(self, input_np, input_crouton_layout): + return transform_numpy(input_np, "nhwc", input_crouton_layout) + @tvm.testing.fixture + def transformed_expected_output_np(self, expected_output_np, output_layout): + return transform_numpy(expected_output_np, "nhwc", output_layout) -class TestQuantize: input_crouton_layout, output_layout, input_dtype = tvm.testing.parameters( ("nhwc-4h2w32c2w-2d", "nhwc-8h8w32c-2d", "float32"), ) @@ -65,7 +67,6 @@ def test_quantize( self, input_dtype, output_dtype, - input_np, transformed_input_np, input_shape, expected_output_np, @@ -74,11 +75,14 @@ def test_quantize( output_layout, hexagon_session, ): - A = te.placeholder(input_shape, name="A", dtype=input_dtype) + """Test quantize.""" + a_tensor = te.placeholder(input_shape, name="a_tensor", dtype=input_dtype) - M = s1.quantize_compute(A, scale, zero_point, output_dtype) + m_tensor = s1.quantize_compute(a_tensor, QUANTIZE_SCALE, QUANTIZE_ZERO_POINT, output_dtype) - tir_schedule = s1.tir_quantize_schedule(M, A, input_crouton_layout, output_layout) + tir_schedule = s1.tir_quantize_schedule( + m_tensor, a_tensor, input_crouton_layout, output_layout + ) sch = tir_schedule.mod @@ -88,12 +92,12 @@ def test_quantize( with tvm.transform.PassContext(opt_level=3): func = tvm.build( sch, - [A, M], + [a_tensor, m_tensor], get_hexagon_target("v69"), name="quantize", ) - A_data_nd = allocate_hexagon_array( + a_data_nd = allocate_hexagon_array( hexagon_session.device, data=transformed_input_np, dtype=input_dtype, @@ -101,7 +105,7 @@ def test_quantize( mem_scope="global.vtcm", ) - M_data_nd = allocate_hexagon_array( + m_data_nd = allocate_hexagon_array( hexagon_session.device, tensor_shape=transformed_expected_output_np.shape, dtype=output_dtype, @@ -110,14 +114,14 @@ def test_quantize( ) mod = hexagon_session.load_module(func) - mod(A_data_nd, M_data_nd) + mod(a_data_nd, m_data_nd) - b, h, w, c = expected_output_np.shape + b, h, weight, c = expected_output_np.shape # convert nd to np and reshape to fixed chunk size layout - M_data_np = M_data_nd.numpy().reshape([b, h // 8, w // 8, c // 32, 8, 8, 32]) + m_data_np = m_data_nd.numpy().reshape([b, h // 8, weight // 8, c // 32, 8, 8, 32]) - np.testing.assert_allclose(transformed_expected_output_np, M_data_np, atol=1) + np.testing.assert_allclose(transformed_expected_output_np, m_data_np, atol=1) if __name__ == "__main__": diff --git a/tests/python/contrib/test_hexagon/topi/test_reduce.py b/tests/python/contrib/test_hexagon/topi/test_reduce.py index 8fc0b6d901ab..eb798db1dd2b 100644 --- a/tests/python/contrib/test_hexagon/topi/test_reduce.py +++ b/tests/python/contrib/test_hexagon/topi/test_reduce.py @@ -24,24 +24,6 @@ from ..infrastructure import get_hexagon_target -in_shape, axis, keepdims, reduce_type, dtype = tvm.testing.parameters( - ((32,), 0, False, "argmax", "float32"), - ((32, 24, 32, 24), (1, 2, 3), True, "sum", "float32"), - ((2, 3), None, True, "all", "bool"), - ((32, 24 * 32 * 24), (1,), False, "max", "float32"), - ((32, 128, 24), None, True, "sum", "float32"), - ((32, 128, 24), None, True, "all", "bool"), - ((32, 24, 32, 24), (0, 2), False, "min", "float32"), - ((32, 128), 1, True, "argmax", "float32"), - ((32, 24, 32, 24), 2, False, "argmin", "float32"), - ((31, 21, 15), None, True, "argmax", "float32"), - ((31, 21, 15), None, False, "sum", "float32"), - ((2, 3), None, True, "any", "bool"), - ((32, 128, 24), None, True, "any", "bool"), - ((1, 4, 7), 1, True, "any", "bool"), - ((32, 24, 32, 24), 2, False, "any", "bool"), -) - def _my_npy_argmax(arr, axis, keepdims): if not keepdims: @@ -68,93 +50,114 @@ def _my_npy_argmin(arr, axis, keepdims): return arr.argmin(axis=axis).reshape(out_shape) -@tvm.testing.fixture(cache_return_value=True) -def ref_data(in_shape, axis, keepdims, reduce_type, dtype): - # Test - if dtype == "bool": - in_npy_map = in_npy = np.random.choice([True, False], size=in_shape) - else: - in_npy = np.random.uniform(-1, 1, size=in_shape).astype(dtype) - in_npy_map = np.sqrt(np.exp(in_npy)).astype(dtype) - - if reduce_type == "sum": - out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims) - elif reduce_type == "all" and dtype == "bool": - out_npy = in_npy_map.all(axis=axis, keepdims=keepdims) - elif reduce_type == "any" and dtype == "bool": - out_npy = in_npy_map.any(axis=axis, keepdims=keepdims) - elif reduce_type == "max": - out_npy = in_npy_map.max(axis=axis, keepdims=keepdims) - elif reduce_type == "min": - out_npy = in_npy_map.min(axis=axis, keepdims=keepdims) - elif reduce_type == "argmax": - out_npy = _my_npy_argmax(in_npy_map, axis=axis, keepdims=keepdims) - elif reduce_type == "argmin": - out_npy = _my_npy_argmin(in_npy_map, axis=axis, keepdims=keepdims) - else: - raise NotImplementedError - - return in_npy, in_npy_map, out_npy - - -@tvm.testing.requires_hexagon -def test_reduce_map( - hexagon_session: Session, ref_data, in_shape, axis, keepdims, reduce_type, dtype -): - in_npy, in_npy_map, out_npy = ref_data - - # Build the logic and compile the function - A = te.placeholder(shape=in_shape, name="A", dtype=dtype) - A1 = topi.sqrt(topi.exp(A)) - out_dtype = dtype - if reduce_type == "sum": - B = topi.sum(A1, axis=axis, keepdims=keepdims) - elif reduce_type == "all": - B = topi.all(A, axis=axis, keepdims=keepdims) - elif reduce_type == "any": - B = topi.any(A, axis=axis, keepdims=keepdims) - elif reduce_type == "max": - B = topi.max(A1, axis=axis, keepdims=keepdims) - elif reduce_type == "min": - B = topi.min(A1, axis=axis, keepdims=keepdims) - elif reduce_type == "argmax": - B = topi.argmax(A1, axis=axis, keepdims=keepdims) - out_dtype = "int32" - elif reduce_type == "argmin": - B = topi.argmin(A1, axis=axis, keepdims=keepdims) - out_dtype = "int32" - else: - raise NotImplementedError - - with tvm.target.Target(get_hexagon_target("v68")): - fschedule = topi.hexagon.schedule_reduce - s = fschedule(B) - - func = tvm.build(s, [A, B], get_hexagon_target("v68"), name=reduce_type) - mod = hexagon_session.load_module(func) - - dev = hexagon_session.device - data_tvm = tvm.nd.array(in_npy, device=dev) - out_tvm = tvm.nd.empty(shape=out_npy.shape, device=dev, dtype=out_dtype) - - mod[reduce_type](data_tvm, out_tvm) - - if reduce_type == "argmax" or reduce_type == "argmin": - out_tvm_indices = out_tvm.numpy() - if keepdims: - out_tvm_indices = np.take(out_tvm_indices, indices=0, axis=axis) - if axis is None: - out_tvm_val = in_npy_map.ravel()[out_tvm_indices] +class TestReduce: + """Test reduce class.""" + + in_shape, axis, keepdims, reduce_type, dtype = tvm.testing.parameters( + ((32,), 0, False, "argmax", "float32"), + ((32, 24, 32, 24), (1, 2, 3), True, "sum", "float32"), + ((2, 3), None, True, "all", "bool"), + ((32, 24 * 32 * 24), (1,), False, "max", "float32"), + ((32, 128, 24), None, True, "sum", "float32"), + ((32, 128, 24), None, True, "all", "bool"), + ((32, 24, 32, 24), (0, 2), False, "min", "float32"), + ((32, 128), 1, True, "argmax", "float32"), + ((32, 24, 32, 24), 2, False, "argmin", "float32"), + ((31, 21, 15), None, True, "argmax", "float32"), + ((31, 21, 15), None, False, "sum", "float32"), + ((2, 3), None, True, "any", "bool"), + ((32, 128, 24), None, True, "any", "bool"), + ((1, 4, 7), 1, True, "any", "bool"), + ((32, 24, 32, 24), 2, False, "any", "bool"), + ) + + @tvm.testing.fixture(cache_return_value=True) + def ref_data(self, in_shape, axis, keepdims, reduce_type, dtype): + """Generate test reference data.""" + if dtype == "bool": + in_npy_map = in_npy = np.random.choice([True, False], size=in_shape) else: - other_indices = tuple(np.indices(in_shape[0:axis] + in_shape[(axis + 1) :])) - sel_indices = other_indices[0:axis] + (out_tvm_indices,) + other_indices[axis:] - out_tvm_val = in_npy_map[sel_indices] - if reduce_type == "argmax": - tvm.testing.assert_allclose(out_tvm_val, in_npy_map.max(axis=axis), 1e-3, 1e-3) + in_npy = np.random.uniform(-1, 1, size=in_shape).astype(dtype) + in_npy_map = np.sqrt(np.exp(in_npy)).astype(dtype) + + if reduce_type == "sum": + out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims) + elif reduce_type == "all" and dtype == "bool": + out_npy = in_npy_map.all(axis=axis, keepdims=keepdims) + elif reduce_type == "any" and dtype == "bool": + out_npy = in_npy_map.any(axis=axis, keepdims=keepdims) + elif reduce_type == "max": + out_npy = in_npy_map.max(axis=axis, keepdims=keepdims) + elif reduce_type == "min": + out_npy = in_npy_map.min(axis=axis, keepdims=keepdims) + elif reduce_type == "argmax": + out_npy = _my_npy_argmax(in_npy_map, axis=axis, keepdims=keepdims) elif reduce_type == "argmin": - tvm.testing.assert_allclose(out_tvm_val, in_npy_map.min(axis=axis), 1e-3, 1e-3) - else: - tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, 1e-3, 1e-3) + out_npy = _my_npy_argmin(in_npy_map, axis=axis, keepdims=keepdims) + else: + raise NotImplementedError + + return in_npy, in_npy_map, out_npy + + @tvm.testing.requires_hexagon + def test_reduce_map( + self, hexagon_session: Session, ref_data, in_shape, axis, keepdims, reduce_type, dtype + ): + """Test reduce map.""" + in_npy, in_npy_map, out_npy = ref_data + + # Build the logic and compile the function + a_tensor = te.placeholder(shape=in_shape, name="a_tensor", dtype=dtype) + a1_tensor = topi.sqrt(topi.exp(a_tensor)) + out_dtype = dtype + if reduce_type == "sum": + b_tensor = topi.sum(a1_tensor, axis=axis, keepdims=keepdims) + elif reduce_type == "all": + b_tensor = topi.all(a_tensor, axis=axis, keepdims=keepdims) + elif reduce_type == "any": + b_tensor = topi.any(a_tensor, axis=axis, keepdims=keepdims) + elif reduce_type == "max": + b_tensor = topi.max(a1_tensor, axis=axis, keepdims=keepdims) + elif reduce_type == "min": + b_tensor = topi.min(a1_tensor, axis=axis, keepdims=keepdims) + elif reduce_type == "argmax": + b_tensor = topi.argmax(a1_tensor, axis=axis, keepdims=keepdims) + out_dtype = "int32" + elif reduce_type == "argmin": + b_tensor = topi.argmin(a1_tensor, axis=axis, keepdims=keepdims) + out_dtype = "int32" + else: + raise NotImplementedError + + with tvm.target.Target(get_hexagon_target("v68")): + fschedule = topi.hexagon.schedule_reduce + s = fschedule(b_tensor) + + func = tvm.build(s, [a_tensor, b_tensor], get_hexagon_target("v68"), name=reduce_type) + mod = hexagon_session.load_module(func) + + dev = hexagon_session.device + data_tvm = tvm.nd.array(in_npy, device=dev) + out_tvm = tvm.nd.empty(shape=out_npy.shape, device=dev, dtype=out_dtype) + + mod[reduce_type](data_tvm, out_tvm) + + if reduce_type in ["argmax", "argmin"]: + out_tvm_indices = out_tvm.numpy() + if keepdims: + out_tvm_indices = np.take(out_tvm_indices, indices=0, axis=axis) + if axis is None: + out_tvm_val = in_npy_map.ravel()[out_tvm_indices] + else: + other_indices = tuple(np.indices(in_shape[0:axis] + in_shape[(axis + 1) :])) + sel_indices = other_indices[0:axis] + (out_tvm_indices,) + other_indices[axis:] + out_tvm_val = in_npy_map[sel_indices] + if reduce_type == "argmax": + tvm.testing.assert_allclose(out_tvm_val, in_npy_map.max(axis=axis), 1e-3, 1e-3) + elif reduce_type == "argmin": + tvm.testing.assert_allclose(out_tvm_val, in_npy_map.min(axis=axis), 1e-3, 1e-3) + else: + tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, 1e-3, 1e-3) if __name__ == "__main__": diff --git a/tests/python/contrib/test_hexagon/topi/test_reshape.py b/tests/python/contrib/test_hexagon/topi/test_reshape.py index 38b8a9cf9a82..33bb31902eaa 100644 --- a/tests/python/contrib/test_hexagon/topi/test_reshape.py +++ b/tests/python/contrib/test_hexagon/topi/test_reshape.py @@ -14,9 +14,8 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. - +"""Test reshape class.""" import numpy as np -import pytest import tvm import tvm.testing @@ -25,6 +24,18 @@ from ..infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target +BATCH_FLATTEN_FP16_TESTS = ( + ([1, 1, 1, 2048], [1, 2048], "nhwc-1024c-2d", "nc-1024-2d", "float16"), + ([1, 2, 4, 2048], [1, 2 * 4 * 2048], "nhwc-1024c-2d", "nc-1024-2d", "float16"), + ([1, 8, 8, 1024], [1, 8 * 8 * 1024], "nhwc-1024c-2d", "nc-1024-2d", "float16"), + ([2, 4, 8, 1024], [2, 4 * 8 * 1024], "nhwc-1024c-2d", "nc-1024-2d", "float16"), +) + +BATCH_FLATTEN_UINT8_TESTS = ( + ([1, 1, 1, 2048], [1, 2048], "nhwc-2048c-2d", "nc-2048-2d", "uint8"), + ([1, 2, 4, 2048], [1, 2 * 4 * 2048], "nhwc-2048c-2d", "nc-2048-2d", "uint8"), +) + def reshape_helper( func, @@ -37,17 +48,18 @@ def reshape_helper( output_layout, hexagon_session, ): + """Reshape helper function.""" - A = te.placeholder(input_shape, name="A", dtype=data_type) + a_tensor = te.placeholder(input_shape, name="a_tensor", dtype=data_type) if func == "reshape": - D = fcompute(A, output_shape) + d_tesnsor = fcompute(a_tensor, output_shape) elif func == "batch_flatten": - D = fcompute(A) + d_tesnsor = fcompute(a_tensor) else: raise RuntimeError(f"Unexpected func'{func}'") tir_s = fschedule( - D, - A, + d_tesnsor, + a_tensor, output_layout, input_layout, ) @@ -87,28 +99,18 @@ def reshape_helper( np.testing.assert_allclose(output.numpy(), ref_np_transformed, atol=1e-07, rtol=0) -batch_flatten_fp16_tests = ( - ([1, 1, 1, 2048], [1, 2048], "nhwc-1024c-2d", "nc-1024-2d", "float16"), - ([1, 2, 4, 2048], [1, 2 * 4 * 2048], "nhwc-1024c-2d", "nc-1024-2d", "float16"), - ([1, 8, 8, 1024], [1, 8 * 8 * 1024], "nhwc-1024c-2d", "nc-1024-2d", "float16"), - ([2, 4, 8, 1024], [2, 4 * 8 * 1024], "nhwc-1024c-2d", "nc-1024-2d", "float16"), -) - - -batch_flatten_uint8_tests = ( - ([1, 1, 1, 2048], [1, 2048], "nhwc-2048c-2d", "nc-2048-2d", "uint8"), - ([1, 2, 4, 2048], [1, 2 * 4 * 2048], "nhwc-2048c-2d", "nc-2048-2d", "uint8"), -) - - class BaseTestBatchFlatten: + """Test batch flatten class.""" + (input_shape, output_shape, input_layout, output_layout, data_type,) = tvm.testing.parameters( - *batch_flatten_fp16_tests, - *batch_flatten_uint8_tests, + *BATCH_FLATTEN_FP16_TESTS, + *BATCH_FLATTEN_UINT8_TESTS, ) class TestBatchFlatten(BaseTestBatchFlatten): + """Test batch flatten class.""" + @tvm.testing.requires_hexagon def test_batch_flatten( self, @@ -119,6 +121,7 @@ def test_batch_flatten( output_layout, hexagon_session, ): + """Test batch flatten.""" reshape_helper( "batch_flatten", sl.batch_flatten_compute, @@ -132,28 +135,30 @@ def test_batch_flatten( ) -reshape_fp16_tests = ( - ([1, 8, 4, 64], [1, 8, 8, 32], "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", "float16"), - ([1, 16, 8, 128], [1, 16, 16, 64], "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", "float16"), -) - +class BaseTestReshape(BaseTestBatchFlatten): + """Test reshape base class.""" -reshape_uint8_tests = ( - ([1, 8, 8, 128], [1, 8, 16, 64], "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d", "uint8"), - ([1, 16, 64, 128], [1, 16, 128, 64], "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d", "uint8"), -) + reshape_fp16_tests = ( + ([1, 8, 4, 64], [1, 8, 8, 32], "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", "float16"), + ([1, 16, 8, 128], [1, 16, 16, 64], "nhwc-8h2w32c2w-2d", "nhwc-8h2w32c2w-2d", "float16"), + ) + reshape_uint8_tests = ( + ([1, 8, 8, 128], [1, 8, 16, 64], "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d", "uint8"), + ([1, 16, 64, 128], [1, 16, 128, 64], "nhwc-8h8w32c-2d", "nhwc-8h8w32c-2d", "uint8"), + ) -class BaseTestReshape(BaseTestBatchFlatten): (input_shape, output_shape, input_layout, output_layout, data_type,) = tvm.testing.parameters( - *batch_flatten_fp16_tests, - *batch_flatten_uint8_tests, + *BATCH_FLATTEN_FP16_TESTS, + *BATCH_FLATTEN_UINT8_TESTS, *reshape_fp16_tests, *reshape_uint8_tests, ) class TestReshape(BaseTestReshape): + """Test reshape class.""" + @tvm.testing.requires_hexagon def test_reshape( self, @@ -164,6 +169,7 @@ def test_reshape( output_layout, hexagon_session, ): + """Test reshape.""" reshape_helper( "reshape", sl.reshape_compute, diff --git a/tests/python/contrib/test_hexagon/topi/test_resize2d.py b/tests/python/contrib/test_hexagon/topi/test_resize2d.py old mode 100755 new mode 100644 index 80cfba5c6c9e..44d9c95a2f06 --- a/tests/python/contrib/test_hexagon/topi/test_resize2d.py +++ b/tests/python/contrib/test_hexagon/topi/test_resize2d.py @@ -14,7 +14,8 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -import pytest +"""Resize 2D tesst. +""" import numpy as np import tvm @@ -24,61 +25,9 @@ from ..infrastructure import allocate_hexagon_array, transform_numpy, get_hexagon_target -@tvm.testing.fixture -def expected_output_np( - input_np, - in_height, - in_width, - out_height, - out_width, - layout, - method, - coord_trans, - dtype, -): - scale_h = out_height / in_height - scale_w = out_width / in_width - - return resize2d_python(input_np, (scale_h, scale_w), layout, method, coord_trans) - - -@tvm.testing.fixture -def input_np(input_shape, dtype): - if dtype == "float16": - return np.random.random(input_shape).astype(dtype) - if dtype == "uint8": - return np.random.randint(0, 255, input_shape).astype(dtype) - if dtype == "int8": - return np.random.randint(-128, 127, input_shape).astype(dtype) - - -@tvm.testing.fixture -def transformed_input_np(input_np, layout, input_crouton_layout, dtype): - if dtype == "float16" or dtype == "uint8" or dtype == "int8": - return transform_numpy(input_np, layout.lower(), input_crouton_layout) - - raise RuntimeError(f"Unsupported data type '{dtype}'") - - -@tvm.testing.fixture -def transformed_expected_output_np(expected_output_np, layout, output_layout, dtype): - if dtype == "float16" or dtype == "uint8" or dtype == "int8": - return transform_numpy(expected_output_np, layout.lower(), output_layout) - - raise RuntimeError(f"Unsupported data type '{dtype}'") - - -@tvm.testing.fixture -def input_shape(batch, channel, in_height, in_width): - return (batch, in_height, in_width, channel) - - -@tvm.testing.fixture -def output_shape(batch, channel, out_height, out_width): - return (batch, out_height, out_width, channel) - - class TestResize2d: + """Test resize 2D class.""" + (batch, channel, in_height, in_width, out_height, out_width,) = tvm.testing.parameters( ( 1, @@ -106,6 +55,56 @@ class TestResize2d: coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel") method = tvm.testing.parameter("nearest_neighbor", "linear") + @tvm.testing.fixture + def expected_output_np( + self, + input_np, + in_height, + in_width, + out_height, + out_width, + layout, + method, + coord_trans, + ): + """Generate expected output.""" + scale_h = out_height / in_height + scale_w = out_width / in_width + + return resize2d_python(input_np, (scale_h, scale_w), layout, method, coord_trans) + + @tvm.testing.fixture + def input_np(self, input_shape, dtype): + if dtype == "float16": + return np.random.random(input_shape).astype(dtype) + if dtype == "uint8": + return np.random.randint(0, 255, input_shape).astype(dtype) + if dtype == "int8": + return np.random.randint(-128, 127, input_shape).astype(dtype) + raise RuntimeError(f"dtype {dtype} is not valid.") + + @tvm.testing.fixture + def transformed_input_np(self, input_np, layout, input_crouton_layout, dtype): + if dtype in ["float16", "uint8", "int8"]: + return transform_numpy(input_np, layout.lower(), input_crouton_layout) + + raise RuntimeError(f"Unsupported data type '{dtype}'") + + @tvm.testing.fixture + def transformed_expected_output_np(self, expected_output_np, layout, output_layout, dtype): + if dtype in ["float16", "uint8", "int8"]: + return transform_numpy(expected_output_np, layout.lower(), output_layout) + + raise RuntimeError(f"Unsupported data type '{dtype}'") + + @tvm.testing.fixture + def input_shape(self, batch, channel, in_height, in_width): + return (batch, in_height, in_width, channel) + + @tvm.testing.fixture + def output_shape(self, batch, channel, out_height, out_width): + return (batch, out_height, out_width, channel) + @tvm.testing.requires_hexagon def test_resize2d( self, @@ -123,10 +122,11 @@ def test_resize2d( method, hexagon_session, ): - A = te.placeholder(input_shape, name="A", dtype=dtype) + """Test resize 2D.""" + a_tensor = te.placeholder(input_shape, name="a_tensor", dtype=dtype) - M = s1.resize2d_compute( - A, + m_tensor = s1.resize2d_compute( + a_tensor, [0.0] * 4, (output_shape[1], output_shape[2]), layout=layout, @@ -135,7 +135,9 @@ def test_resize2d( out_dtype=dtype, ) - tir_schedule = s1.tir_resize2d_schedule(M, A, input_crouton_layout, output_layout) + tir_schedule = s1.tir_resize2d_schedule( + m_tensor, a_tensor, input_crouton_layout, output_layout + ) sch = tir_schedule.mod @@ -151,12 +153,12 @@ def test_resize2d( with tvm.transform.PassContext(opt_level=3): func = tvm.build( sch, - [A, M], + [a_tensor, m_tensor], get_hexagon_target("v69"), name="resize2d", ) - A_data_nd = allocate_hexagon_array( + a_data_nd = allocate_hexagon_array( hexagon_session.device, data=transformed_input_np, dtype=dtype, @@ -164,7 +166,7 @@ def test_resize2d( mem_scope="global.vtcm", ) - M_data_nd = allocate_hexagon_array( + m_data_nd = allocate_hexagon_array( hexagon_session.device, transformed_expected_output_np.shape, dtype=dtype, @@ -173,21 +175,25 @@ def test_resize2d( ) mod = hexagon_session.load_module(func) - mod(A_data_nd, M_data_nd) + mod(a_data_nd, m_data_nd) - b, h, w, c = output_shape + batch_size, height, width, channel = output_shape # convert nd to np and reshape to fixed chunk size layout if output_layout == "nhwc-8h2w32c2w-2d": - M_data_np = M_data_nd.numpy().reshape([b, h // 8, w // 4, c // 32, 8, 2, 32, 2]) + m_data_np = m_data_nd.numpy().reshape( + [batch_size, height // 8, width // 4, channel // 32, 8, 2, 32, 2] + ) elif output_layout == "nhwc-8h8w32c-2d": - M_data_np = M_data_nd.numpy().reshape([b, h // 8, w // 8, c // 32, 8, 8, 32]) + m_data_np = m_data_nd.numpy().reshape( + [batch_size, height // 8, width // 8, channel // 32, 8, 8, 32] + ) if dtype == "float16": np.testing.assert_allclose( - transformed_expected_output_np, M_data_np, rtol=1e-3, atol=1e-3 + transformed_expected_output_np, m_data_np, rtol=1e-3, atol=1e-3 ) - elif dtype == "int8" or dtype == "uint8": - np.testing.assert_allclose(transformed_expected_output_np, M_data_np, rtol=1, atol=1) + elif dtype in ["int8", "uint8"]: + np.testing.assert_allclose(transformed_expected_output_np, m_data_np, rtol=1, atol=1) if __name__ == "__main__": diff --git a/tests/python/contrib/test_hexagon/topi/test_softmax.py b/tests/python/contrib/test_hexagon/topi/test_softmax.py index 91f348494d6d..e1b4d97bc171 100644 --- a/tests/python/contrib/test_hexagon/topi/test_softmax.py +++ b/tests/python/contrib/test_hexagon/topi/test_softmax.py @@ -28,13 +28,8 @@ from ..infrastructure import get_hexagon_target -dtype = tvm.testing.parameter( - "float16", - "float32", -) - # TODO(mehrdadh): add log_softmax to config -configs = { +OPERATOR_CONFIGS = { "softmax": { "topi": topi.nn.softmax, "ref": tvm.topi.testing.softmax_python, @@ -42,57 +37,69 @@ }, } -# TODO(mehrdadh): larger size like (1, 16, 256, 256) would fail due to TVM_HEXAGON_RPC_BUFF_SIZE_BYTES -shapes = [(32, 10), (3, 4), (1, 16, 32, 32)] -softmax_operation, shape = tvm.testing.parameters( - *[ - (name, shape) - for name, config in configs.items() - for shape in shapes - if len(shape) in config["dimensions"] - ] -) - - -@tvm.testing.requires_hexagon -def test_softmax(hexagon_session: Session, shape, dtype, softmax_operation): - if dtype == "float16": - pytest.xfail("float16 is not supported.") - A = te.placeholder(shape, dtype=dtype, name="A") - - topi_op = configs[softmax_operation]["topi"] - B = topi_op(A, axis=1) - - def get_ref_data(shape): - ref_func = tvm.topi.testing.softmax_python - a_np = np.random.uniform(size=shape).astype(dtype) - - if len(shape) == 2: - b_np = ref_func(a_np) - elif len(shape) == 4: - _, c, h, w = a_np.shape - a_np_2d = a_np.transpose(0, 2, 3, 1).reshape(h * w, c) - b_np_2d = tvm.topi.testing.softmax_python(a_np_2d) - b_np = b_np_2d.reshape(1, h, w, c).transpose(0, 3, 1, 2) - - return a_np, b_np - - # get the test data - a_np, b_np = get_ref_data(shape) - - with tvm.target.Target(get_hexagon_target("v68")): - fschedule = topi.hexagon.schedule_softmax - s = fschedule(B) - - func = tvm.build(s, [A, B], get_hexagon_target("v68"), name="softmax") - mod = hexagon_session.load_module(func) - - dev = hexagon_session.device - a = tvm.nd.array(a_np, dev) - b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev) - mod["softmax"](a, b) - - tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) + +class TestSoftmax: + """Softmax test class.""" + + dtype = tvm.testing.parameter( + "float16", + "float32", + ) + + # TODO(mehrdadh): larger size like (1, 16, 256, 256) + # would fail due to TVM_HEXAGON_RPC_BUFF_SIZE_BYTES + shape = tvm.testing.parameter((32, 10), (3, 4), (1, 16, 32, 32)) + + @tvm.testing.fixture + def softmax_operation(self, shape) -> tuple: + """Returns the operation name and shape.""" + for name, config in OPERATOR_CONFIGS.items(): + if len(shape) in config["dimensions"]: + return name + else: + raise ValueError(f"Shape {shape} is not supported.") + + @tvm.testing.requires_hexagon + def test_softmax(self, hexagon_session: Session, dtype, shape, softmax_operation): + """Test softmax.""" + if dtype == "float16": + pytest.xfail("float16 is not supported.") + + a_tensor = te.placeholder(shape, dtype=dtype, name="a_tensor") + + topi_op = OPERATOR_CONFIGS[softmax_operation]["topi"] + b_tensor = topi_op(a_tensor, axis=1) + + def get_ref_data(shape): + ref_func = tvm.topi.testing.softmax_python + a_np = np.random.uniform(size=shape).astype(dtype) + + if len(shape) == 2: + b_np = ref_func(a_np) + elif len(shape) == 4: + _, c, height, width = a_np.shape + a_np_2d = a_np.transpose(0, 2, 3, 1).reshape(height * width, c) + b_np_2d = tvm.topi.testing.softmax_python(a_np_2d) + b_np = b_np_2d.reshape(1, height, width, c).transpose(0, 3, 1, 2) + + return a_np, b_np + + # get the test data + a_np, b_np = get_ref_data(shape) + + with tvm.target.Target(get_hexagon_target("v68")): + fschedule = topi.hexagon.schedule_softmax + s = fschedule(b_tensor) + + func = tvm.build(s, [a_tensor, b_tensor], get_hexagon_target("v68"), name="softmax") + mod = hexagon_session.load_module(func) + + dev = hexagon_session.device + a = tvm.nd.array(a_np, dev) + b = tvm.nd.array(np.zeros(get_const_tuple(b_tensor.shape), dtype=b_tensor.dtype), dev) + mod["softmax"](a, b) + + tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5) if __name__ == "__main__":