From dcc3e51bc1e73e4aeaeab84e485548e4305d35b8 Mon Sep 17 00:00:00 2001 From: Masahiro Masuda Date: Wed, 15 Aug 2018 23:54:13 +0900 Subject: [PATCH] bind gpu threads to injective op properly --- nnvm/tests/python/compiler/test_op_fusion.py | 34 ++++++++++++++++++++ topi/python/topi/cuda/conv2d_nchw.py | 2 +- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/nnvm/tests/python/compiler/test_op_fusion.py b/nnvm/tests/python/compiler/test_op_fusion.py index 8d05ae02c579..38e686b54a21 100644 --- a/nnvm/tests/python/compiler/test_op_fusion.py +++ b/nnvm/tests/python/compiler/test_op_fusion.py @@ -77,6 +77,39 @@ def test_injective_reduce_injective(): np.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5) +def test_injective_conv2d(): + channels = 16 + data = sym.Variable(name="data") + pool = sym.global_avg_pool2d(data=data) + weight = sym.reshape(pool, shape=[1, channels, 1, 1]) + residual = sym.conv2d(data=data, kernel_size=(3,3), channels=channels, padding=(1, 1), + layout="NCHW", kernel_layout="OIHW", use_bias=False, name="conv") + net = weight * data + residual + size = 56 + dtype="float32" + dshape = (1, channels, size, size) + kshape = (channels, channels, 3, 3) + oshape = dshape + shape_dict = {"data": dshape} + + for target, ctx in ctx_list(): + graph, lib, _ = nnvm.compiler.build(net, target, shape_dict) + # data, global_avg_pool, conv weight, fused op + assert graph.index.num_nodes == 4 + + data = tvm.nd.array(np.random.uniform(size=dshape).astype(dtype)) + kernel = tvm.nd.array(np.random.uniform(size=kshape).astype(dtype)) + m = graph_runtime.create(graph, lib, ctx) + m.run(data=data, conv_weight=kernel) + # get output + out = m.get_output(0, tvm.nd.empty(oshape, dtype)) + residual = topi.testing.conv2d_nchw_python( + data.asnumpy(), kernel.asnumpy(), (1,1), 'SAME') + weight = np.mean(data.asnumpy(), axis=(2, 3)) + c_np = weight[:, :, np.newaxis, np.newaxis] * data.asnumpy() + residual + np.testing.assert_allclose(out.asnumpy(), c_np, rtol=1e-5) + + def build_and_run(sym, params, data, out_shape, target, ctx, opt_level=2): with nnvm.compiler.build_config(opt_level=opt_level): graph, lib, params = nnvm.compiler.build(sym, target, shape={"data":data.shape}, params=params) @@ -123,3 +156,4 @@ def get_sym(out_channel): test_ewise_injective() test_conv_ewise_injective() test_fuse_conv2d_elu() + test_injective_conv2d() diff --git a/topi/python/topi/cuda/conv2d_nchw.py b/topi/python/topi/cuda/conv2d_nchw.py index 4f7539d224eb..1957496cd7b8 100644 --- a/topi/python/topi/cuda/conv2d_nchw.py +++ b/topi/python/topi/cuda/conv2d_nchw.py @@ -497,7 +497,7 @@ def schedule(temp, Filter, Output): def traverse(OP): """Traverse operators from computation graph""" # inline all one-to-one-mapping operators except the last stage (output) - if tag.is_broadcast(OP.tag): + if tag.is_injective(OP.tag): if OP not in s.outputs: s[OP].compute_inline() for tensor in OP.input_tensors: