diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index 4268a4876adc..827ad0e2bdd3 100755 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -304,6 +304,7 @@ def convert_conv2d(g, op, block): kernel = g.get_node(op.input("Filter")[0]) input_x = g.get_node(op.input("Input")[0]) + data_layout = op.attr("data_format") out_channels, _, k_h, k_w = infer_shape(kernel) if padding_algorithm == "VALID": paddings = [0, 0] @@ -332,6 +333,7 @@ def convert_conv2d(g, op, block): groups=groups, channels=out_channels, kernel_size=[k_h, k_w], + data_layout=data_layout, ) g.add_node(op.output("Output")[0], out) @@ -407,6 +409,7 @@ def convert_conv3d(g, op, block): kernel = g.get_node(op.input("Filter")[0]) input_x = g.get_node(op.input("Input")[0]) + data_layout = op.attr("data_format") out_channels, _, k_d, k_h, k_w = infer_shape(kernel) if padding_algorithm == "VALID": paddings = [0, 0, 0] @@ -446,6 +449,7 @@ def convert_conv3d(g, op, block): groups=groups, channels=out_channels, kernel_size=[k_d, k_h, k_w], + data_layout=data_layout, ) g.add_node(op.output("Output")[0], out) @@ -821,7 +825,9 @@ def convert_gaussian_random(g, op, block): std = op.attr("std") shape = op.attr("shape") seed = op.attr("seed") - out = _op.random.normal(key=seed, shape=shape, mean=mean, scale=std) + dtype = op.attr("dtype") + dtype = _convert_dtype_value(dtype) + out = _op.random.normal(key=seed, shape=shape, dtype=dtype, mean=mean, scale=std) g.add_node(op.output("Out")[0], out) @@ -2164,9 +2170,13 @@ def convert_softplus(g, op, block): beta = op.attr("beta") beta = _expr.const(beta, dtype=dtype) threshold = op.attr("threshold") + + if threshold is None: + threshold = _expr.const(20.0, dtype=dtype) threshold = _expr.const(threshold, dtype=dtype) out_softplus = _op.log(_op.exp(x * beta) + _expr.const(1.0, dtype=dtype)) / beta out = _op.where(_op.greater(x * beta, threshold), x, out_softplus) + g.add_node(op.output("Out")[0], out) diff --git a/tests/python/frontend/paddlepaddle/test_forward.py b/tests/python/frontend/paddlepaddle/test_forward.py index 1555ba1aaaa4..bbe3daeb916c 100755 --- a/tests/python/frontend/paddlepaddle/test_forward.py +++ b/tests/python/frontend/paddlepaddle/test_forward.py @@ -509,6 +509,34 @@ def __init__(self, stride=1, padding=0, dilation=1, groups=1, padding_mode="zero def forward(self, inputs): return self.softmax(self.conv(inputs)) + class Conv2D2(nn.Layer): + def __init__( + self, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode="zeros", + data_format="NCHW", + ): + super(Conv2D2, self).__init__() + self.conv = nn.Conv2D( + 3, + 6, + 3, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + padding_mode=padding_mode, + data_format=data_format, + ) + self.softmax = nn.Softmax() + + @paddle.jit.to_static + def forward(self, inputs): + return self.softmax(self.conv(inputs)) + input_shapes = [[1, 3, 10, 10], [1, 3, 12, 12]] for input_shape in input_shapes: @@ -521,6 +549,10 @@ def forward(self, inputs): input_data=input_data, ) verify_model(Conv2D1(stride=2, padding="SAME", dilation=2, groups=3), input_data=input_data) + verify_model( + Conv2D2(stride=2, padding="SAME", dilation=2, groups=3, data_format="NCHW"), + input_data=input_data, + ) @tvm.testing.uses_gpu @@ -575,6 +607,34 @@ def __init__(self, stride=1, padding=0, dilation=1, groups=1, padding_mode="zero def forward(self, inputs): return self.softmax(self.conv(inputs)) + class Conv3D2(nn.Layer): + def __init__( + self, + stride=1, + padding=0, + dilation=1, + groups=1, + padding_mode="zeros", + data_format="NCDHW", + ): + super(Conv3D2, self).__init__() + self.conv = nn.Conv3D( + 3, + 6, + 3, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + padding_mode=padding_mode, + data_format=data_format, + ) + self.softmax = nn.Softmax() + + @paddle.jit.to_static + def forward(self, inputs): + return self.softmax(self.conv(inputs)) + input_shapes = [[1, 3, 10, 10, 10], [1, 3, 12, 12, 12]] for input_shape in input_shapes: @@ -595,6 +655,10 @@ def forward(self, inputs): input_data=input_data, ) verify_model(Conv3D(stride=2, padding="SAME", dilation=2, groups=3), input_data=input_data) + verify_model( + Conv3D2(stride=2, padding="SAME", dilation=2, groups=3, data_format="NCDHW"), + input_data=input_data, + ) @tvm.testing.uses_gpu @@ -1720,11 +1784,31 @@ def test_forward_sin(): pass -@run_math_api +@tvm.testing.uses_gpu def test_forward_softplus(): - x = paddle.to_tensor([-0.4, 1], dtype="float32") - m = paddle.nn.Softplus(5, 1) - verify_model(m, [x]) + @paddle.jit.to_static + def Softplus1(input): + return paddle.nn.functional.softplus(input, beta=1.0, threshold=20.0) + + @paddle.jit.to_static + def Softplus2(input): + return paddle.nn.functional.softplus(input, beta=6.0, threshold=20.0) + + @paddle.jit.to_static + def Softplus3(input): + return paddle.nn.functional.softplus(input, beta=1.0, threshold=10.0) + + x = paddle.to_tensor([-8.0, -12.0, 1.0, 18.0, 25.0]) + verify_model(Softplus1, x) + verify_model(Softplus2, x) + verify_model(Softplus3, x) + + input_shapes = [[10], [2, 3], [5, 10, 11], [3, 4, 5, 6]] + for input_shape in input_shapes: + input_data = paddle.randn(shape=input_shape, dtype="float32") + verify_model(Softplus1, input_data=input_data) + verify_model(Softplus2, input_data=input_data) + verify_model(Softplus3, input_data=input_data) @run_math_api