From 3b615061ed767d53adfe24222e1a8033d7a58215 Mon Sep 17 00:00:00 2001 From: Krzysztof Parzyszek Date: Thu, 11 May 2023 10:43:35 -0700 Subject: [PATCH 1/2] [Frontend] Use f-strings for string formatting, NFC Replace uses of % and .format() with f-strings. --- python/tvm/relay/frontend/caffe.py | 13 +- python/tvm/relay/frontend/caffe2.py | 20 ++- python/tvm/relay/frontend/common.py | 28 ++-- python/tvm/relay/frontend/coreml.py | 22 +-- python/tvm/relay/frontend/darknet.py | 9 +- python/tvm/relay/frontend/keras.py | 95 ++++++------ python/tvm/relay/frontend/mxnet.py | 54 ++++--- .../tvm/relay/frontend/mxnet_qnn_op_utils.py | 4 +- python/tvm/relay/frontend/nnvm_common.py | 2 +- python/tvm/relay/frontend/oneflow.py | 44 +++--- python/tvm/relay/frontend/onnx.py | 136 +++++++++--------- python/tvm/relay/frontend/paddlepaddle.py | 32 ++--- python/tvm/relay/frontend/pytorch.py | 65 ++++----- python/tvm/relay/frontend/qnn_torch.py | 4 +- python/tvm/relay/frontend/tensorflow.py | 36 ++--- python/tvm/relay/frontend/tensorflow2.py | 12 +- python/tvm/relay/frontend/tensorflow2_ops.py | 2 +- python/tvm/relay/frontend/tensorflow_ops.py | 77 +++++----- python/tvm/relay/frontend/tflite.py | 45 +++--- 19 files changed, 337 insertions(+), 363 deletions(-) diff --git a/python/tvm/relay/frontend/caffe.py b/python/tvm/relay/frontend/caffe.py index 2d05049efe50..f3af53efffae 100644 --- a/python/tvm/relay/frontend/caffe.py +++ b/python/tvm/relay/frontend/caffe.py @@ -133,7 +133,7 @@ def convert_eltwise(self, op): out = _op.maximum(out, extra_expr) else: raise tvm.error.OpNotImplemented( - "eltwise_type {} is not supported for frontend Caffe.".format(eltwise_type) + f"eltwise_type {eltwise_type} is not supported for frontend Caffe." ) return out @@ -351,7 +351,7 @@ def convert_conv(self, op): weight_value = np.asarray(weight.data, np.float32) weight_value = np.reshape(weight_value, weight_shape) else: - raise Exception("No weight value of layer {} in caffemodel".format(op.name)) + raise Exception(f"No weight value of layer {op.name} in caffemodel") weight_expr = self.exp_tab.new_const(weight_value, dtype="float32") in_expr = self.exp_tab.get_expr(inputs[0]) @@ -416,8 +416,7 @@ def convert_pooling(self, op): out = _op.nn.avg_pool2d(in_expr, **params) else: raise tvm.error.OpNotImplemented( - "Operator {} is not supported for frontend Caffe.".format( - pool_type_dict[pool_type] + " pool" + f"Operator {pool_type_dict[pool_type]} pool is not supported for frontend Caffe." ) ) @@ -465,7 +464,7 @@ def convert_innerproduct(self, op): weight_value = np.reshape(weight_value, (params["num_output"], -1)) weight_shape = weight_value.shape else: - raise Exception("No weight value of layer {} in caffemodel".format(op.name)) + raise Exception(f"No weight value of layer {op.name} in caffemodel") weight_expr = self.exp_tab.new_const(weight_value, dtype="float32") @@ -549,7 +548,7 @@ def convert_deconv(self, op): weight_value = np.transpose(weight_value, [1, 0, 2, 3]) else: raise tvm.error.OpAttributeRequired( - "No weight value of layer {} in caffemodel".format(op.name) + f"No weight value of layer {op.name} in caffemodel" ) weight_expr = self.exp_tab.new_const(weight_value, dtype="float32") @@ -670,7 +669,7 @@ def convert_reduction(self, op): out = _op.sum(in_expr, axis=axis) else: raise tvm.error.OpAttributeInvalid( - "reduction method:{} is invalid in Caffe frontend.".format(method) + f"reduction method:{method} is invalid in Caffe frontend." ) if float(coeff) != 1.0: diff --git a/python/tvm/relay/frontend/caffe2.py b/python/tvm/relay/frontend/caffe2.py index c85c4a63b836..a7fcd0cc1ea7 100644 --- a/python/tvm/relay/frontend/caffe2.py +++ b/python/tvm/relay/frontend/caffe2.py @@ -36,7 +36,7 @@ def _impl(attr): if len(kernel) == 2: return prefix + "2d" + surfix raise tvm.error.OpAttributeUnImplemented( - "Non-2D kernels are not supported for operator {}2d".format(prefix) + f"Non-2D kernels are not supported for operator {prefix}2d" ) return _impl @@ -122,7 +122,7 @@ def get_converter(cls): if hasattr(cls, "_impl"): return getattr(cls, "_impl") raise tvm.error.OpNotImplemented( - "Operator {} is not supported in frontend Caffe2.".format(cls.__name__) + f"Operator {cls.__name__} is not supported in frontend Caffe2." ) @@ -151,7 +151,7 @@ class Elemwise(Caffe2OpConverter): @classmethod def _impl(cls, inputs, args, params): - assert len(inputs) == 2, "Math op take 2 inputs, {} given".format(len(inputs)) + assert len(inputs) == 2, f"Math op take 2 inputs, {len(inputs)} given" op_name = cls.name conv_ops = ["conv2d", "conv2d_transpose"] if args.get("broadcast", 0) and any(x in str(inputs[0]) for x in conv_ops): @@ -282,7 +282,7 @@ def _get_axis_from_order_str(order): if order == "NHWC": return 3 raise tvm.error.OpAttributeUnImplemented( - "Order {} is not supported in operator Concat.".format(order) + f"Order {order} is not supported in operator Concat." ) return AttrCvt( @@ -498,9 +498,7 @@ def _get_node(self, blob): if blob in self._nodes: return self._nodes[blob] - assert blob not in self._visited_nodes, "Cyclic dependency in the graph (in {})".format( - blob - ) + assert blob not in self._visited_nodes, f"Cyclic dependency in the graph (in {blob})" self._visited_nodes.add(blob) self._process_op(self._ops[blob]) @@ -531,12 +529,12 @@ def _parse_arg(self, arg): args[a.name] = tuple(getattr(a, f)) for f in ["n"]: if a.HasField(f): - raise NotImplementedError("Field {} is not supported in relay.".format(f)) + raise NotImplementedError(f"Field {f} is not supported in relay.") for f in ["nets"]: if list(getattr(a, f)): - raise NotImplementedError("Field {} is not supported in relay.".format(f)) + raise NotImplementedError(f"Field {f} is not supported in relay.") if a.name not in args: - raise ValueError("Cannot parse attribute: \n{}\n.".format(a)) + raise ValueError(f"Cannot parse attribute: \n{a}\n.") return args def _convert_operator(self, op_type, inputs, args, identity_list=None, convert_map=None): @@ -573,7 +571,7 @@ def _convert_operator(self, op_type, inputs, args, identity_list=None, convert_m func = convert_map[op_type](inputs, args, self._params) else: raise tvm.error.OpNotImplemented( - "Operator {} is not supported in frontend Caffe2.".format(op_type) + f"Operator {op_type} is not supported in frontend Caffe2." ) return func diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index 39e17b27da2a..0433d3b52ebf 100644 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -99,7 +99,7 @@ def get_float(self, key, default=RequiredAttr()): if key in self.attrs: return float(self.attrs[key]) if isinstance(default, RequiredAttr): - raise AttributeError("Required attribute {} not found.".format(key)) + raise AttributeError(f"Required attribute {key} not found.") return default def get_int(self, key, default=RequiredAttr()): @@ -123,7 +123,7 @@ def get_int(self, key, default=RequiredAttr()): return None return int(val) if isinstance(default, RequiredAttr): - raise AttributeError("Required attribute {} not found.".format(key)) + raise AttributeError(f"Required attribute {key} not found.") return default def get_str(self, key, default=RequiredAttr()): @@ -144,7 +144,7 @@ def get_str(self, key, default=RequiredAttr()): if key in self.attrs: return self.attrs[key] if isinstance(default, RequiredAttr): - raise AttributeError("Required attribute {} not found.".format(key)) + raise AttributeError(f"Required attribute {key} not found.") return default def get_int_tuple(self, key, default=RequiredAttr()): @@ -170,7 +170,7 @@ def get_int_tuple(self, key, default=RequiredAttr()): if x ) if isinstance(default, RequiredAttr): - raise AttributeError("Required attribute {} not found.".format(key)) + raise AttributeError(f"Required attribute {key} not found.") return default def get_float_tuple(self, key, default=RequiredAttr()): @@ -193,7 +193,7 @@ def get_float_tuple(self, key, default=RequiredAttr()): tshape = self.attrs[key] return tuple(float(x.strip()) for x in tshape.strip("()[]").split(",")) if isinstance(default, RequiredAttr): - raise AttributeError("Required attribute {} not found.".format(key)) + raise AttributeError(f"Required attribute {key} not found.") return default def get_tuple_tuple_int(self, key, default=RequiredAttr()): @@ -222,7 +222,7 @@ def get_tuple_tuple_int(self, key, default=RequiredAttr()): return tuple(seq) if isinstance(default, RequiredAttr): - raise AttributeError("Required attribute {} not found.".format(key)) + raise AttributeError(f"Required attribute {key} not found.") return default def get_int_list(self, key, default=RequiredAttr()): @@ -244,7 +244,7 @@ def get_int_list(self, key, default=RequiredAttr()): tshape = self.attrs[key] return tuple(int(x.strip()) for x in tshape.strip("[]()").split(",")) if isinstance(default, RequiredAttr): - raise AttributeError("Required attribute {} not found.".format(key)) + raise AttributeError(f"Required attribute {key} not found.") return default def get_bool(self, key, default=RequiredAttr()): @@ -266,7 +266,7 @@ def get_bool(self, key, default=RequiredAttr()): val = self.attrs[key] return val.strip().lower() in ["true", "1", "t", "y", "yes"] if isinstance(default, RequiredAttr): - raise AttributeError("Required attribute {} not found.".format(key)) + raise AttributeError(f"Required attribute {key} not found.") return default @@ -292,7 +292,7 @@ def get_relay_op(op_name): if op is not None: break if not op: - raise tvm.error.OpNotImplemented("Unable to map op_name {} to relay".format(op_name)) + raise tvm.error.OpNotImplemented(f"Unable to map op_name {op_name} to relay") return op @@ -307,7 +307,7 @@ def __init__(self): def new_const(self, value, shape=None, dtype="float32", source_name=None): """Construct a new var expr and add to exprs dictionary""" - name = "_param_%d" % (self.const_ctr) + name = f"_param_{self.const_ctr}" if hasattr(value, "shape"): shape = value.shape self.const_ctr += 1 @@ -412,7 +412,7 @@ def __call__(self, inputs, attrs, *args): if self._custom_check: func, msg = self._custom_check if not func(attrs): - raise RuntimeError("Check failed: {}".format(msg)) + raise RuntimeError(f"Check failed: {msg}") # get new op_name if isinstance(self._op_name, str): op_name = self._op_name @@ -465,7 +465,7 @@ def _parse_default(self, target): else: k = None # should raise if not isinstance(k, str): - msg = "{} is not a valid target, (name, default) expected.".format(target) + msg = f"{target} is not a valid target, (name, default) expected." raise ValueError(msg) return k, v, t @@ -479,7 +479,7 @@ def _required_attr(self, attr, key): """Wrapper for getting required attributes.""" assert isinstance(attr, dict) if key not in attr: - raise AttributeError("Required attribute {} not found.".format(key)) + raise AttributeError(f"Required attribute {key} not found.") return attr[key] @@ -1035,7 +1035,7 @@ def ensure_scalar_shape(x): return x num_elem = np.prod(x_shape) - assert num_elem == 1, "Cannot squeeze tensor shape {} to scalar form.".format(x_shape) + assert num_elem == 1, f"Cannot squeeze tensor shape {x_shape} to scalar form." return _op.squeeze(x) diff --git a/python/tvm/relay/frontend/coreml.py b/python/tvm/relay/frontend/coreml.py index e515843e5fe2..09cfcb7430ed 100644 --- a/python/tvm/relay/frontend/coreml.py +++ b/python/tvm/relay/frontend/coreml.py @@ -183,7 +183,7 @@ def _ActivationParams(op, inexpr, etab): beta_expr = etab.new_const(beta) return _op.multiply(_op.log(_op.add(_op.exp(inexpr), beta_expr)), alpha_expr) raise tvm.error.OpNotImplemented( - "Operator {} is not supported in frontend CoreML.".format(whichActivation) + f"Operator {whichActivation} is not supported in frontend CoreML." ) @@ -231,9 +231,9 @@ def _PoolingLayerParams(op, inexpr, etab): params["padding"] = padding params["ceil_mode"] = True else: - msg = "PoolingPaddingType {} is not supported in operator Pooling." op_name = op.WhichOneof("PoolingPaddingType") - raise tvm.error.OpAttributeUnImplemented(msg.format(op_name)) + msg = f"PoolingPaddingType {op_name} is not supported in operator Pooling." + raise tvm.error.OpAttributeUnImplemented(msg) if op.type == 0: return _op.nn.max_pool2d(inexpr, **params) @@ -302,7 +302,7 @@ def _PaddingLayerParams(op, inexpr, etab): constant = op.constant if constant.value != 0: raise tvm.error.OpAttributeUnImplemented( - "{} is not supported in operator Padding.".format(constant.value) + f"{constant.value} is not supported in operator Padding." ) pad_t = op.paddingAmounts.borderAmounts[0].startEdgeSize pad_l = op.paddingAmounts.borderAmounts[1].startEdgeSize @@ -391,8 +391,8 @@ def _UnaryFunctionLayerParams(op, inexpr, etab): alpha = _expr.const(op.alpha) return _op.maximum(inexpr, alpha) else: - msg = "Unary Op type value {} is not supported in frontend CoreML." - raise tvm.error.OpAttributeUnImplemented(msg.format(op_type)) + msg = f"Unary Op type value {op_type} is not supported in frontend CoreML." + raise tvm.error.OpAttributeUnImplemented(msg)) def _ReduceLayerParams(op, inexpr, etab): @@ -408,8 +408,8 @@ def _ReduceLayerParams(op, inexpr, etab): elif axis == op.W: axis = -1 else: - msg = "Reduce axis value {} is not supported in frontend CoreML." - raise tvm.error.OpAttributeUnImplemented(msg.format(axis)) + msg = f"Reduce axis value {axis} is not supported in frontend CoreML." + raise tvm.error.OpAttributeUnImplemented(msg) mode = op.mode if mode == op.SUM: @@ -425,8 +425,8 @@ def _ReduceLayerParams(op, inexpr, etab): elif mode == op.ARGMAX: return _op.argmax(inexpr, axis=axis, keepdims=True) else: - msg = "Reduce mode value {} is not supported in frontend CoreML." - raise tvm.error.OpAttributeUnImplemented(msg.format(mode)) + msg = f"Reduce mode value {mode} is not supported in frontend CoreML." + raise tvm.error.OpAttributeUnImplemented(msg) def _ReshapeLayerParams(op, inexpr, etab): @@ -511,7 +511,7 @@ def coreml_op_to_relay(op, inname, outnames, etab): classname = type(op).__name__ if classname not in _convert_map: raise tvm.error.OpNotImplemented( - "Operator {} is not supported in frontend CoreML.".format(classname) + f"Operator {classname} is not supported in frontend CoreML." ) if isinstance(inname, _base.string_types): insym = etab.get_expr(inname) diff --git a/python/tvm/relay/frontend/darknet.py b/python/tvm/relay/frontend/darknet.py index 363812fd562b..aff3df3b91c5 100644 --- a/python/tvm/relay/frontend/darknet.py +++ b/python/tvm/relay/frontend/darknet.py @@ -34,8 +34,7 @@ def _darknet_not_support(attr, op="relay"): """Raise error if any operation is not supported.""" - err = "{} is not supported in {}.".format(attr, op) - raise NotImplementedError(err) + raise NotImplementedError(f"{attr} is not supported in {op}.") def _get_params_prefix(opname, layer_num): @@ -51,7 +50,7 @@ def _get_params_name(prefix, item): def _get_param_var(params, prefix, item): name = _get_params_name(prefix, item) if name not in params: - raise AttributeError("{} not found in params dict.".format(name)) + raise AttributeError(f"{name} not found in params dict.") return new_var(name, shape=params[name].shape, dtype=params[name].dtype) @@ -688,7 +687,7 @@ def _get_darknet_attrs(self, layer, layer_num): pass else: - err = "Darknet layer type {} is not supported in relay.".format(layer_type) + err = f"Darknet layer type {layer_type} is not supported in relay." raise NotImplementedError(err) return attr @@ -743,7 +742,7 @@ def _get_opname(self, layer): def _new_rnn_state_var(self, state=None, name="rnn"): """Returs a symbol for state""" - sym_name = name + "%d_state" % self._state_ctr[name] + sym_name = name + f"{self._state_ctr[name]}_state" self._state_ctr[name] += 1 return new_var(sym_name, shape=state.shape, dtype=str(state.dtype)) diff --git a/python/tvm/relay/frontend/keras.py b/python/tvm/relay/frontend/keras.py index b820ad586df6..283da87d88b3 100644 --- a/python/tvm/relay/frontend/keras.py +++ b/python/tvm/relay/frontend/keras.py @@ -122,7 +122,7 @@ def _convert_activation( return _op.clip(x, a_min=0.0, a_max=1.0) raise tvm.error.OpNotImplemented( - "Operator {} is not supported in frontend Keras.".format(act_type) + f"Operator {act_type} is not supported in frontend Keras." ) @@ -136,7 +136,7 @@ def _convert_advanced_activation(inexpr, keras_layer, etab, data_layout, input_s dims = len(input_shape) if isinstance(axis, list): raise tvm.error.OpAttributeUnImplemented( - "Softmax with axes {} is not supported.".format(axis) + f"Softmax with axes {axis} is not supported." ) if data_layout == "NCHW": if axis == -1: @@ -183,7 +183,7 @@ def _convert_advanced_activation(inexpr, keras_layer, etab, data_layout, input_s ) raise tvm.error.OpNotImplemented( - "Operator {} is not supported in frontend Keras.".format(act_type) + f"Operator {act_type} is not supported in frontend Keras." ) @@ -199,18 +199,18 @@ def _convert_merge( if isinstance(axes, list): if len(axes) != 2: raise tvm.error.OpAttributeUnImplemented( - "Dot with axes {} is not supported.".format(keras_layer.axes) + f"Dot with axes {keras_layer.axes} is not supported." ) for i, axis in enumerate(axes): if axis not in [1, 2]: raise tvm.error.OpAttributeUnImplemented( - "Dot with axes {} is not supported.".format(keras_layer.axes) + f"Dot with axes {keras_layer.axes} is not supported." ) if axes[i] == 2: inexpr[i] = _op.transpose(inexpr[i], axes=[0, 2, 1]) else: raise tvm.error.OpAttributeUnImplemented( - "Dot with axes {} is not supported.".format(keras_layer.axes) + f"Dot with axes {keras_layer.axes} is not supported." ) ret_dot = _op.nn.batch_matmul(inexpr[0], inexpr[1]) ret = _op.transpose(ret_dot, axes=[0, 2, 1]) @@ -232,7 +232,7 @@ def _convert_merge( ret = ret / _expr.const(len(inexpr), dtype="float32") else: raise tvm.error.OpNotImplemented( - "Operator {} is not supported in frontend Keras.".format(merge_type) + f"Operator {merge_type} is not supported in frontend Keras." ) return ret @@ -268,7 +268,7 @@ def _convert_dense( input_shape = tuple(dim if dim else 1 for dim in _as_list(input_shape)[0]) if input_dim != 3 or input_shape[0] != 1 or input_shape[1] != 1: raise tvm.error.OpAttributeInvalid( - "Input shape {} is not valid for operator Dense.".format(input_shape) + f"Input shape {nput_shape} is not valid for operator Dense." ) inexpr = _op.squeeze(inexpr, axis=[0]) out = _op.nn.dense(data=inexpr, **params) @@ -305,10 +305,10 @@ def _convert_convolution1d(inexpr, keras_layer, etab, data_layout, input_shape=N if is_deconv: kernel_layout = "IOW" msg = ( - "Kernel layout with {} is not supported for operator Convolution1D " - "in frontend Keras." + f"Kernel layout with {data_layout} is not supported for operator Convolution1D " + f"in frontend Keras." ) - raise tvm.error.OpAttributeUnImplemented(msg.format(data_layout)) + raise tvm.error.OpAttributeUnImplemented(msg) if is_deconv: if kernel_layout == "IOW": @@ -344,8 +344,9 @@ def _convert_convolution1d(inexpr, keras_layer, etab, data_layout, input_shape=N pad_w = _get_pad_pair(in_w, dilated_kernel_w, stride_w) params["padding"] = [pad_w[0], pad_w[1]] else: - msg = "Padding with {} is not supported for operator Convolution3D " "in frontend Keras." - raise tvm.error.OpAttributeUnImplemented(msg.format(keras_layer.padding)) + msg = (f"Padding with {keras_layer.padding} is not supported for operator Convolution3D " + f"in frontend Keras.") + raise tvm.error.OpAttributeUnImplemented(msg) if is_deconv: out = _op.nn.conv1d_transpose(data=inexpr, **params) @@ -434,8 +435,9 @@ def _convert_convolution(inexpr, keras_layer, etab, data_layout, input_shape=Non pad_l, pad_r = _get_pad_pair(in_w, dilated_kernel_w, stride_w) params["padding"] = (pad_t, pad_l, pad_b, pad_r) else: - msg = "Padding with {} is not supported for operator Convolution " "in frontend Keras." - raise tvm.error.OpAttributeUnImplemented(msg.format(keras_layer.padding)) + msg = (f"Padding with {keras_layer.padding} is not supported for operator Convolution " + f"in frontend Keras.") + raise tvm.error.OpAttributeUnImplemented(msg) if is_deconv: out = _op.nn.conv2d_transpose(data=inexpr, **params) else: @@ -475,10 +477,10 @@ def _convert_convolution3d(inexpr, keras_layer, etab, data_layout, input_shape=N if is_deconv: kernel_layout = "IODHW" msg = ( - "Kernel layout with {} is not supported for operator Convolution3D " - "in frontend Keras." + f"Kernel layout with {data_layout} is not supported for operator Convolution3D " + f"in frontend Keras." ) - raise tvm.error.OpAttributeUnImplemented(msg.format(data_layout)) + raise tvm.error.OpAttributeUnImplemented(msg) if is_deconv: kernel_d, kernel_h, kernel_w, n_filters, _ = weight.shape @@ -520,8 +522,9 @@ def _convert_convolution3d(inexpr, keras_layer, etab, data_layout, input_shape=N pad_w = _get_pad_pair(in_w, dilated_kernel_w, stride_w) params["padding"] = [pad_d[0], pad_h[0], pad_w[0], pad_d[1], pad_h[1], pad_w[1]] else: - msg = "Padding with {} is not supported for operator Convolution3D " "in frontend Keras." - raise tvm.error.OpAttributeUnImplemented(msg.format(keras_layer.padding)) + msg = (f"Padding with {keras_layer.padding} is not supported for operator Convolution3D " + f"in frontend Keras.") + raise tvm.error.OpAttributeUnImplemented(msg) if is_deconv: out = _op.nn.conv3d_transpose(data=inexpr, **params) else: @@ -584,10 +587,10 @@ def _convert_separable_convolution(inexpr, keras_layer, etab, data_layout, input params0["padding"] = (pad_t, pad_l, pad_b, pad_r) else: msg = ( - "Padding with {} is not supported for operator Separable " - "Convolution in frontend Keras." + f"Padding with {keras_layer.padding} is not supported for operator Separable " + f"Convolution in frontend Keras." ) - raise tvm.error.OpAttributeUnImplemented(msg.format(keras_layer.padding)) + raise tvm.error.OpAttributeUnImplemented(msg) depthconv = _op.nn.conv2d(data=inexpr, **params0) # pointwise conv if kernel_layout == "OIHW": @@ -673,7 +676,7 @@ def _convert_pooling( params["padding"] = [pad_t, pad_l, pad_b, pad_r] else: raise tvm.error.OpAttributeUnImplemented( - "Padding with {} is not supported in operator Pooling.".format(keras_layer.padding) + f"Padding with {keras_layer.padding} is not supported in operator Pooling." ) if pool_type == "MaxPooling2D": return _op.nn.max_pool2d(inexpr, **params) @@ -681,7 +684,7 @@ def _convert_pooling( params["count_include_pad"] = False return _op.nn.avg_pool2d(inexpr, **params) raise tvm.error.OpNotImplemented( - "Operator {} is not supported for frontend Keras.".format(keras_layer) + f"Operator {keras_layer} is not supported for frontend Keras." ) @@ -695,7 +698,7 @@ def _convert_pooling3d( if pool_type not in ["MaxPooling3D", "AveragePooling3D"]: raise tvm.error.OpNotImplemented( - "Operator {} is not supported for frontend Keras.".format(keras_layer) + f"Operator {keras_layer} is not supported for frontend Keras." ) pool_d1, pool_d2, pool_d3 = keras_layer.pool_size @@ -719,7 +722,7 @@ def _convert_pooling3d( params["padding"] = [pad_d1[0], pad_d2[0], pad_d3[0], pad_d1[1], pad_d2[1], pad_d3[1]] else: raise tvm.error.OpAttributeUnImplemented( - "Padding with {} is not supported in operator Pooling3D.".format(keras_layer.padding) + f"Padding with {keras_layer.padding} is not supported in operator Pooling3D." ) out = _op.transpose(inexpr, axes=(0, 4, 1, 2, 3)) @@ -745,7 +748,7 @@ def _convert_global_pooling3d( out = _op.nn.global_avg_pool3d(inexpr, **global_pool_params) else: raise tvm.error.OpNotImplemented( - "Operator {} is not supported for frontend Keras.".format(keras_layer) + f"Operator {keras_layer} is not supported for frontend Keras." ) return _convert_flatten(out, keras_layer, etab, input_shape, data_layout) @@ -775,7 +778,7 @@ def _convert_upsample( params["method"] = "bilinear" else: raise tvm.error.OpNotImplemented( - "Operator {} is not supported for frontend Keras.".format(upsample_type) + f"Operator {upsample_type} is not supported for frontend Keras." ) params["layout"] = data_layout out = _op.nn.upsampling(inexpr, **params) @@ -810,7 +813,7 @@ def _convert_cropping( ((crop_t, crop_b), (crop_l, crop_r)) = keras_layer.cropping else: raise tvm.error.OpNotImplemented( - "Operator {} is not supported for frontend Keras.".format(crop_type) + f"Operator {crop_type} is not supported for frontend Keras." ) int32_max = np.iinfo(np.int32).max return _op.strided_slice( @@ -874,14 +877,15 @@ def _convert_padding( top, bottom = padding[0] left, right = padding[1] else: - msg = 'Value {} in attribute "padding" of operator Padding ' "is not valid." - raise tvm.error.OpAttributeInvalid(msg.format(str(padding))) + msg = (f'Value {str(padding)} in attribute "padding" of operator Padding is ' + f'not valid.') + raise tvm.error.OpAttributeInvalid(msg) else: - msg = 'Value {} in attribute "padding" of operator Padding is ' "not valid." - raise tvm.error.OpAttributeInvalid(msg.format(str(padding))) + msg = f'Value {str(padding)} in attribute "padding" of operator Padding is not valid.' + raise tvm.error.OpAttributeInvalid(msg) else: - msg = "Operator {} is not supported in frontend Keras." - raise tvm.error.OpNotImplemented(msg.format(padding_type)) + msg = f"Operator {padding_type} is not supported in frontend Keras." + raise tvm.error.OpNotImplemented(msg) if data_layout == "NCHW": return _op.nn.pad(data=inexpr, pad_width=((0, 0), (0, 0), (top, bottom), (left, right))) return _op.nn.pad(data=inexpr, pad_width=((0, 0), (top, bottom), (left, right), (0, 0))) @@ -904,8 +908,8 @@ def _convert_padding3d( h_pad = padding[1] w_pad = padding[2] else: - msg = 'Value {} in attribute "padding" of operator ZeroPadding3D is ' "not valid." - raise tvm.error.OpAttributeInvalid(msg.format(str(padding))) + msg = f'Value {str(padding)} in attribute "padding" of operator ZeroPadding3D is not valid.' + raise tvm.error.OpAttributeInvalid(msg) if data_layout == "NCDHW": out = _op.nn.pad( @@ -1199,9 +1203,7 @@ def _convert_lambda(inexpr, keras_layer, _, data_layout): ): return _convert_l2_normalize(inexpr, keras_layer, data_layout) raise tvm.error.OpNotImplemented( - "Function {} used in Lambda layer is not supported in frontend Keras.".format( - fcode.co_names - ) + f"Function {fcode.co_names} used in Lambda layer is not supported in frontend Keras." ) @@ -1227,9 +1229,8 @@ def _convert_time_distributed(inexpr, keras_layer, etab, data_layout, input_shap inner_layer_op_name = type(keras_layer.layer).__name__ if inner_layer_op_name not in _convert_map: raise tvm.error.OpNotImplemented( - "The inner layer for TimeDistributed {} is not supported for frontend Keras.".format( - inner_layer_op_name - ) + f"The inner layer for TimeDistributed {inner_layer_op_name} is not supported for" + f" frontend Keras." ) conversion_func = lambda expr: _convert_map[inner_layer_op_name]( @@ -1347,7 +1348,7 @@ def _check_unsupported_layers(model): if missing_ops: raise NotImplementedError( - "The following operators are not implemented: {}".format(missing_ops) + f"The following operators are not implemented: {missing_ops}" ) @@ -1374,7 +1375,7 @@ def keras_op_to_relay(inexpr, keras_layer, outname, etab, data_layout): op_name = type(keras_layer).__name__ if op_name not in _convert_map: raise tvm.error.OpNotImplemented( - "Operator {} is not supported for frontend Keras.".format(op_name) + f"Operator {op_name} is not supported for frontend Keras." ) outs = _convert_map[op_name](inexpr, keras_layer, etab, data_layout) outs = _as_list(outs) @@ -1427,7 +1428,7 @@ def _convert_layer(keras_layer, etab, scope=""): ) if inbound_nodes is None: raise TypeError( - "Unknown layer type or unsupported Keras version : {}".format(keras_layer) + f"Unknown layer type or unsupported Keras version : {keras_layer}" ) outs = [] for node_idx, node in enumerate(inbound_nodes): diff --git a/python/tvm/relay/frontend/mxnet.py b/python/tvm/relay/frontend/mxnet.py index 7497dcdd8022..f7cf39309b8b 100644 --- a/python/tvm/relay/frontend/mxnet.py +++ b/python/tvm/relay/frontend/mxnet.py @@ -105,7 +105,7 @@ def _get_channel_axis(layout, op_name): if layout == "NDHWC": return 4 raise tvm.error.OpAttributeInvalid( - 'Value {} in attribute "layout" of operator {} is not valid.'.format(layout, op_name) + f'Value {padding} in attribute "layout" of operator {op_name} is not valid.' ) @@ -123,7 +123,7 @@ def _stable_softrelu(x): return _stable_softrelu(inputs[0]) if act_type not in _activation_map: raise tvm.error.OpNotImplemented( - "Operator {} is not supported for frontend MXNet.".format(act_type) + f"Operator {act_type} is not supported for frontend MXNet." ) return _activation_map[act_type](inputs[0]) @@ -439,9 +439,7 @@ def _pool3d(new_op, is_avg): return _op.nn.global_avg_pool3d(inputs[0]) return _pool3d(_op.nn.avg_pool3d, True) raise tvm.error.OpNotImplemented( - "Operator {} Pooling is not supported for frontend MXNet.".format( - pool_type.capitalize() - ) + f"Operator {pool_type.capitalize()} Pooling is not supported for frontend MXNet." ) # 2D Pooling if pool_type == "max": @@ -453,7 +451,7 @@ def _pool3d(new_op, is_avg): return _op.nn.global_avg_pool2d(inputs[0]) return _pool2d(_op.nn.avg_pool2d, True) raise tvm.error.OpNotImplemented( - "Operator {} Pooling is not supported for frontend MXNet.".format(pool_type.capitalize()) + f"Operator {pool_type.capitalize()} Pooling is not supported for frontend MXNet." ) @@ -702,7 +700,7 @@ def _mx_leaky_relu(inputs, attrs): half_x = _op.multiply(inputs[0], half) return _op.multiply(half_x, erf_plus_one) raise tvm.error.OpNotImplemented( - "Operator {} is not supported for frontend MXNet.".format(act_type) + f"Operator {act_type} is not supported for frontend MXNet." ) @@ -810,8 +808,8 @@ def _mx_dot(inputs, attrs): transpose_b = attrs.get_bool("transpose_b", False) if transpose_a is True: - msg = 'Value {} in attribute "transpose_a" of operator dot ' "is not valid." - raise tvm.error.OpAttributeInvalid(msg.format(transpose_a)) + msg = f'Value {transpose_a} in attribute "transpose_a" of operator dot is not valid.' + raise tvm.error.OpAttributeInvalid(msg) # When performing dot product we need to properly handle shape of result -> out_shape if rank_a == 1: @@ -860,8 +858,8 @@ def _mx_batch_dot(inputs, attrs): transpose_a = attrs.get_bool("transpose_a", False) transpose_b = attrs.get_bool("transpose_b", False) if transpose_a is True: - msg = 'Value {} in attribute "transpose_a" of operator batch_dot ' "is not valid." - raise tvm.error.OpAttributeInvalid(msg.format(transpose_a)) + msg = f'Value {transpose_a} in attribute "transpose_a" of operator batch_dot is not valid.' + raise tvm.error.OpAttributeInvalid(msg) if transpose_b is False: b = _op.transpose(b, axes=[0, 2, 1]) out = _op.nn.batch_matmul(a, b) @@ -1158,7 +1156,7 @@ def _mx_l2_normalize(inputs, attrs): new_attrs["axis"] = list(range(2, ndim)) else: raise tvm.error.OpAttributeInvalid( - 'Mode "{}" is not supported for operator l2_normalize.'.format(mode) + f'Mode "{mode}" is not supported for operator l2_normalize.' ) new_attrs["eps"] = attrs.get_float("eps", 1e-10) return _op.nn.l2_normalize(inputs[0], **new_attrs) @@ -1575,7 +1573,7 @@ def _mx_cond(inputs, attrs, subgraphs): input_args = [] for i, arg in enumerate(inputs): - var = _expr.var("arg%s" % i, _infer_type(arg).checked_type) + var = _expr.var(f"arg{i}", _infer_type(arg).checked_type) input_args.append(var) cond_args = [input_args[i] for i in cond_input_locs] then_args = [input_args[i] for i in then_input_locs] @@ -1678,7 +1676,7 @@ def _qnn_quantize(inputs, attrs): else: out_dtype = out_type if out_dtype not in {"int8", "uint8"}: - raise ValueError("Unsupported out_dtype: %s" % out_dtype) + raise ValueError(f"Unsupported out_dtype: {out_dtype}") min_calib_range = attrs.get_float("min_calib_range", 0.0) max_calib_range = attrs.get_float("max_calib_range", 0.0) quantized_output, _, _ = quantize_mxnet_min_max( @@ -1703,14 +1701,14 @@ def _qnn_contrib_quantized_fifo_buffer(inputs, attrs, params): def _get_subgraph_op(subgraphs, op_name): - assert len(subgraphs) == 1, "Subgraph should have 1 node but has {}".format(len(subgraphs)) + assert len(subgraphs) == 1, f"Subgraph should have 1 node but has {len(subgraphs)}" subgraph = subgraphs[0] nodes = subgraph["nodes"] assert nodes is not None for node in nodes: if node["op"] == op_name: return node - raise ValueError("Op {} was not found in the subgraph".format(op_name)) + raise ValueError(f"Op {op_name} was not found in the subgraph") def _qnn_conv(inputs, attrs, subgraphs, params): @@ -1721,7 +1719,7 @@ def _has_fused_activation(_attrs, _supported_activations): act_type = subgraph_activation_attrs["act_type"] if act_type not in _supported_activations: raise ValueError( - "Fused activation {} is not supported at " "this time".format(act_type) + f"Fused activation {act_type} is not supported at this time" ) has_fused_activation = True return has_fused_activation @@ -2333,7 +2331,7 @@ def _mx_broadcast_like(inputs, attrs): for axes in ["lhs_axes", "rhs_axes"]: if axes in attrs.attrs: raise tvm.error.OpAttributeUnImplemented( - 'Attribute "{}" is not supported for operator broadcast_like.'.format(axes) + f'Attribute "{axes}" is not supported for operator broadcast_like.' ) return _op.broadcast_to_like(*inputs) @@ -2430,9 +2428,9 @@ def _mx_npx_reshape(inputs, attrs): elif ele == -3: if old_shape[ptr] != 1: raise tvm.error.OpAttributeInvalid( - "Dimension of the original shape " - "that corresponds to -3 must be 1. Received" - " {}".format(old_shape[ptr]) + f"Dimension of the original shape " + f"that corresponds to -3 must be 1. Received" + f" {old_shape[ptr]}" ) ptr += 1 elif ele == -4: @@ -2468,7 +2466,7 @@ def _mx_npx_reshape(inputs, attrs): new_shape.append(rhs) ptr += 1 else: - raise tvm.error.OpAttributeInvalid("Shape dimension %d is not supported" % ele) + raise tvm.error.OpAttributeInvalid(f"Shape dimension {ele} is not supported") if reverse: new_shape = new_shape[::-1] return _op.reshape(inputs[0], newshape=new_shape) @@ -2836,9 +2834,9 @@ def _from_mxnet_impl(symbol, shape_dict, dtype_info, params=None, mod=None): unsupported[op_name] += 1 if unsupported: - msg = "\n".join(["{}: {}".format(op_name, cnt) for op_name, cnt in unsupported.items()]) + msg = "\n".join([f"{op_name}: {cnt}" for op_name, cnt in unsupported.items()]) raise tvm.error.OpNotImplemented( - "One or more operators are not supported in frontend MXNet:\n{}".format(msg) + f"One or more operators are not supported in frontend MXNet:\n{msg}" ) for nid, node in enumerate(jnodes): @@ -2874,7 +2872,7 @@ def _from_mxnet_impl(symbol, shape_dict, dtype_info, params=None, mod=None): elif isinstance(res, _expr.Expr): res = [res] else: - raise RuntimeError("unexpected type %s" % type(res)) + raise RuntimeError(f"unexpected type {type(res)}") node_map[nid] = res outputs = [node_map[e[0]][e[1]] for e in jgraph["heads"]] outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs) @@ -2892,7 +2890,7 @@ def _update_shape_dtype(shape, dtype, params): if isinstance(dtype, str): for k, v in params.items(): if v.dtype != dtype: - raise ValueError("%s: dtype not expected %s vs %s" % (k, dtype, v.dtype)) + raise ValueError(f"{k}: dtype not expected {dtype} vs {v.dtype}") else: dtype = dtype.copy() dtype.update({k: str(v.dtype) for k, v in params.items()}) @@ -2930,7 +2928,7 @@ def from_mxnet(symbol, shape=None, dtype="float32", arg_params=None, aux_params= try: import mxnet as mx # pylint: disable=import-outside-toplevel except ImportError as e: - raise ImportError("{}. MXNet is required to parse symbols.".format(e)) + raise ImportError(f"{e}. MXNet is required to parse symbols.") mod = IRModule() if isinstance(symbol, mx.sym.Symbol): @@ -2960,7 +2958,7 @@ def from_mxnet(symbol, shape=None, dtype="float32", arg_params=None, aux_params= elif isinstance(symbol, mx.gluon.Block): raise NotImplementedError("Only Hybrid Blocks are supported now.") else: - msg = "mxnet.Symbol or gluon.HybridBlock expected, got {}".format(type(symbol)) + msg = f"mxnet.Symbol or gluon.HybridBlock expected, got {type(symbol)}" raise ValueError(msg) mod["main"] = func return mod, params diff --git a/python/tvm/relay/frontend/mxnet_qnn_op_utils.py b/python/tvm/relay/frontend/mxnet_qnn_op_utils.py index fd0d4c1c8f8c..1b2cf2ef4c83 100644 --- a/python/tvm/relay/frontend/mxnet_qnn_op_utils.py +++ b/python/tvm/relay/frontend/mxnet_qnn_op_utils.py @@ -313,7 +313,7 @@ def quantize_mxnet_min_max(data, min_range, max_range, out_dtype="int8"): elif out_dtype == "int8": return _quantize_mkldnn_min_max_int8(data, min_range, max_range) else: - raise ValueError("Expected out_dtype to be int8 or uint8 but was %s" % out_dtype) + raise ValueError("Expected out_dtype to be int8 or uint8 but was {out_dtype}") def _dequantize_zero_centered(data, data_min, data_max, quantized_range): @@ -442,4 +442,4 @@ def dequantize_mxnet_min_max(data, min_range, max_range, in_dtype="int8"): elif in_dtype == "int8": return _dequantize_mkldnn_min_max_int8(data, min_range, max_range) else: - raise ValueError("Expected out_dtype to be int8 or uint8 but was %s" % in_dtype) + raise ValueError(f"Expected out_dtype to be int8 or uint8 but was {in_dtype}") diff --git a/python/tvm/relay/frontend/nnvm_common.py b/python/tvm/relay/frontend/nnvm_common.py index caf3729c3d1f..4a611e0537cd 100644 --- a/python/tvm/relay/frontend/nnvm_common.py +++ b/python/tvm/relay/frontend/nnvm_common.py @@ -27,7 +27,7 @@ def _warn_not_used(attr, op="nnvm"): - err = "{} is ignored in {}.".format(attr, op) + err = f"{attr} is ignored in {op}." warnings.warn(err) diff --git a/python/tvm/relay/frontend/oneflow.py b/python/tvm/relay/frontend/oneflow.py index 500725364971..5e63eebecbeb 100644 --- a/python/tvm/relay/frontend/oneflow.py +++ b/python/tvm/relay/frontend/oneflow.py @@ -87,7 +87,7 @@ def get_node_info(node): if dtype in list(FLOW_2_STR_DTYPE.keys()): data_type = FLOW_2_STR_DTYPE[dtype] else: - raise IndexError("Please check the data type of your node: %s" % node.name) + raise IndexError(f"Please check the data type of your node: {node.name}") return shape, data_type @@ -184,9 +184,9 @@ def get_converter(cls): converter, which should be `_impl_vx`. """ version = 1 - if hasattr(cls, "_impl_v{}".format(version)): - return getattr(cls, "_impl_v{}".format(version)) - raise NotImplementedError("version {} of {} not implemented".format(version, cls.__name__)) + if hasattr(cls, f"_impl_v{version}"): + return getattr(cls, f"_impl_v{version}") + raise NotImplementedError(f"version {version} of {cls.__name__} not implemented") class Pool(OneFlowOpConverter): @@ -523,7 +523,7 @@ class MatMul(OneFlowOpConverter): @classmethod def _impl_v1(cls, inputs, attrs, params): - assert len(inputs) == 2, "MatMul op take 2 inputs, {} given".format(len(inputs)) + assert len(inputs) == 2, f"MatMul op take 2 inputs, {len(inputs)} given" dtype = infer_type(inputs[0]).checked_type.dtype # Y = alpha * A * B @@ -678,9 +678,7 @@ class Square(OneFlowOpConverter): @classmethod def _impl_v1(cls, inputs, attrs, params): - assert len(inputs) == 1, "Square op {} take 1 inputs, {} given".format( - cls.name, len(inputs) - ) + assert len(inputs) == 1, f"Square op {cls.name} take 1 inputs, {len(inputs)} given" return _op.multiply(inputs[0], inputs[0]) @@ -691,7 +689,7 @@ class Add(OneFlowOpConverter): @classmethod def _impl_v1(cls, inputs, attrs, params): - assert len(inputs) == 2, "Math op {} take 2 inputs, {} given".format(cls.name, len(inputs)) + assert len(inputs) == 2, f"Math op {cls.name} take 2 inputs, {len(inputs)} given" axis = int(attrs.get("axis", 0)) true_names = ["weight", "bias"] @@ -767,7 +765,7 @@ class BroadcastMath(OneFlowOpConverter): @classmethod def _impl_v1(cls, inputs, attrs, params): - assert len(inputs) == 2, "Math op {} take 2 inputs, {} given".format(cls.name, len(inputs)) + assert len(inputs) == 2, f"Math op {cls.name} take 2 inputs, {len(inputs)} given" beta_names = ["weight", "bias", "mean", "var", "Constant"] for i in inputs: @@ -878,9 +876,7 @@ class Unary(OneFlowOpConverter): @classmethod def _impl_v1(cls, inputs, attrs, params): - assert len(inputs) == 1, "Unary math op {} takes 1 input, {} given".format( - cls.name, len(inputs) - ) + assert len(inputs) == 1, f"Unary math op {cls.name} takes 1 input, {len(inputs)} given" return get_relay_op(cls.name)(*inputs) @@ -908,7 +904,7 @@ class ScalarAdd(OneFlowOpConverter): @classmethod def _impl_v1(cls, inputs, attrs, params): - assert len(inputs) == 1, "add_scalar take == 1 inputs, but {} given.".format(len(inputs)) + assert len(inputs) == 1, f"add_scalar take == 1 inputs, but {len(inputs)} given." if attrs.get("has_int_operand", True): res = inputs[0] + _expr.const(attrs["int_operand"]) @@ -927,7 +923,7 @@ class ScalarMul(OneFlowOpConverter): @classmethod def _impl_v1(cls, inputs, attrs, params): - assert len(inputs) == 1, "add_scalar take == 1 inputs, but {} given.".format(len(inputs)) + assert len(inputs) == 1, f"mul_scalar take == 1 inputs, but {len(inputs)} given." if attrs.get("has_int_operand", True): res = inputs[0] * _expr.const(attrs["int_operand"], dtype="float32") @@ -946,7 +942,7 @@ class ScalarDiv(OneFlowOpConverter): @classmethod def _impl_v1(cls, inputs, attrs, params): - assert len(inputs) == 1, "div_scalar take == 1 inputs, but {} given.".format(len(inputs)) + assert len(inputs) == 1, f"div_scalar take == 1 inputs, but {len(inputs)} given." if attrs.get("has_int_operand", True): res = inputs[0] / _expr.const(attrs["int_operand"], dtype="float32") @@ -1068,7 +1064,7 @@ class PReLU(OneFlowOpConverter): @classmethod def _impl_v1(cls, inputs, attrs, params): - assert len(inputs) == 2, "PReLU need 2 inputs, but {} given".format(len(inputs)) + assert len(inputs) == 2, f"PReLU need 2 inputs, but {len(inputs)} given" for i in inputs: if "_input." in str(i): prelu_a = i @@ -1365,7 +1361,7 @@ class Range(OneFlowOpConverter): @classmethod def _impl_v1(cls, inputs, attrs, params): if len(inputs) != 0: - raise ValueError("Expect no inputs but get {}".format(len(inputs))) + raise ValueError(f"Expect no inputs but get {len(inputs)}") start = attrs.get("start", 0.0) limit = attrs.get("limit", 1.0) delta = attrs.get("delta", 1.0) @@ -1558,7 +1554,7 @@ def deal_with_input_convert( op_replace = copy.deepcopy(_nodes[node_replace]) _nodes[node_input] = op_replace else: - print("{} will not be in _nodes".format(node_input)) + print(f"{node_input} will not be in _nodes") def deal_parameter_convert( @@ -1664,7 +1660,7 @@ def __init__(self, shape, dtype, nodes, model_dir_path): self._dtype[node.name] = dtype self._init_variable_node.append(node.name) if self._init_variable_node != []: - print("{} should be defined by user".format(self._init_variable_node)) + print(f"{self._init_variable_node} should be defined by user") def _parse_input(self, node, model_dir_path): input_user_conf_list = [] @@ -1785,9 +1781,7 @@ def from_oneflow(self, nodes, model_dir_path): assert ( len(node_outputs) == outputs_num - ), "Number of output mismatch {} vs {} in {}.".format( - len(node_outputs), outputs_num, op_name - ) + ), f"Number of output mismatch {len(node_outputs)} vs {outputs_num} in {op_name}." if outputs_num == 1: op = fold_constant(op) else: @@ -1837,7 +1831,7 @@ def from_oneflow(self, nodes, model_dir_path): if input_name in self._inputs: self._sort_inputs[input_name] = self._inputs[input_name] else: - raise IndexError("{} is not in self._inputs".format(input_name)) + raise IndexError(f"{input_name} is not in self._inputs") # step 6: create a function from our output expression and all input variables. func = _function.Function([v for _, v in self._sort_inputs.items()], outputs) @@ -1866,7 +1860,7 @@ def _convert_operator(self, op_name, node_inputs, op_attr): elif op_name in convert_map: sym = convert_map[op_name](node_inputs, op_attr, self._params) else: - raise NotImplementedError("Operator {} not implemented.".format(op_name)) + raise NotImplementedError(f"Operator {op_name} not implemented.") return sym diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 05e86dbe004b..031158b666bf 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -85,7 +85,7 @@ def __getitem__(self, item): return [self[i] for i in indices] if isinstance(item, int): return list(self)[item] if item < len(self) else None - raise TypeError("list indices must be integers or slices, not %s" % type(item).__name__) + raise TypeError(f"list indices must be integers or slices, not {type(item).__name__}") def get_numpy(tensor_proto): @@ -93,7 +93,7 @@ def get_numpy(tensor_proto): try: from onnx.numpy_helper import to_array except ImportError as e: - raise ImportError("Unable to import onnx which is required {}".format(e)) + raise ImportError(f"Unable to import onnx which is required {e}") return to_array(tensor_proto) @@ -107,12 +107,12 @@ def get_type(elem_type): try: from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE except ImportError as e: - raise ImportError("Unable to import onnx which is required {}".format(e)) + raise ImportError(f"Unable to import onnx which is required {e}") try: from onnx import TensorProto except ImportError as e: - raise ImportError("Unable to import TensorProto from onnx {}".format(e)) + raise ImportError(f"Unable to import TensorProto from onnx {e}") # Onnx mapping converts bfloat16 to float16 because # numpy does not have a bfloat16 data type. However, @@ -155,9 +155,9 @@ def _impl(attr): return prefix + "2d" + suffix if len(kernel) == 3: return prefix + "3d" + suffix - msg = "Only 1D, 2D, and 3D kernels are supported for operator {}." op_name = prefix + "1d/2d/3d" - raise tvm.error.OpAttributeInvalid(msg.format(op_name)) + msg = f"Only 1D, 2D, and 3D kernels are supported for operator {op_name}." + raise tvm.error.OpAttributeInvalid(msg) return _impl @@ -194,8 +194,8 @@ def onnx_default_layout(dims, op_name): if dims == 3: return "NCDHW" - msg = "Only 1D, 2D and 3D layouts are currently supported for operator {}." - raise tvm.error.OpAttributeInvalid(msg.format(op_name)) + msg = f"Only 1D, 2D and 3D layouts are currently supported for operator {op_name}." + raise tvm.error.OpAttributeInvalid(msg) def onnx_storage_order2layout(storage_order, dims, op_name): @@ -210,8 +210,8 @@ def onnx_storage_order2layout(storage_order, dims, op_name): if dims == 3: return "NCDHW" if storage_order == 0 else "NDHWC" - msg = "Only 1D, 2D and 3D layouts are currently supported for operator {}." - raise tvm.error.OpAttributeInvalid(msg.format(op_name)) + msg = f"Only 1D, 2D and 3D layouts are currently supported for operator {op_name}." + raise tvm.error.OpAttributeInvalid(msg) def dimension_constraint(): @@ -567,7 +567,7 @@ def get_source_name(node, type_dict): op_idx = type_dict[node.op_type] + 1 type_dict[node.op_type] = op_idx # rewrite name property in case any revisiting occurs to current node - node.name = "{}_{}".format(node.op_type, str(op_idx)) + node.name = f"{node.op_type}_{op_idx}" return node.name @@ -608,10 +608,10 @@ def get_converter(cls, opset): versions = [int(d.replace("_impl_v", "")) for d in dir(cls) if "_impl_v" in d] versions = sorted(versions + [opset]) version = versions[max([i for i, v in enumerate(versions) if v == opset]) - 1] - if hasattr(cls, "_impl_v{}".format(version)): - return getattr(cls, "_impl_v{}".format(version)) + if hasattr(cls, f"_impl_v{version}"): + return getattr(cls, f"_impl_v{version}") raise NotImplementedError( - "opset version {} of {} not implemented".format(version, cls.__name__) + f"opset version {version} of {cls.__name__} not implemented" ) @@ -622,9 +622,7 @@ class Unary(OnnxOpConverter): @classmethod def _impl_v1(cls, inputs, attr, params): - assert len(inputs) == 1, "Unary math op {} takes 1 input, {} given".format( - cls.name, len(inputs) - ) + assert len(inputs) == 1, f"Unary math op {cls.name} takes 1 input, {len(inputs)} given" op_name = cls.name return get_relay_op(op_name)(*inputs) @@ -636,7 +634,7 @@ class Elemwise(OnnxOpConverter): @classmethod def _impl_v1(cls, inputs, attr, params): - assert len(inputs) == 2, "Math op {} take 2 inputs, {} given".format(cls.name, len(inputs)) + assert len(inputs) == 2, f"Math op {cls.name} take 2 inputs, {len(inputs)} given" op_name = cls.name conv_ops = ["conv2d", "conv2d_transpose"] if attr.get("broadcast", 0) and any(x in str(inputs[0]) for x in conv_ops): @@ -705,8 +703,9 @@ def _run_calculation(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = 'Value {} in attribute "auto_pad" of operator {} is invalid.' - raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"], cls.name)) + msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator {cls.name} ' + f"is invalid.") + raise tvm.error.OpAttributeInvalid(msg) attr.pop("auto_pad") if "storage_order" in attr: @@ -835,8 +834,9 @@ def _impl_v1(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.' - raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"])) + msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' + f"is invalid.") + raise tvm.error.OpAttributeInvalid(msg) attr.pop("auto_pad") attr["channels"] = kernel_shapes[0][0] @@ -921,8 +921,9 @@ def _impl_v1(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.' - raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"])) + msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' + f'is invalid.') + raise tvm.error.OpAttributeInvalid(msg) if "auto_pad" in attr: attr.pop("auto_pad") @@ -1001,8 +1002,9 @@ def _impl_v11(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.' - raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"])) + msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' + f'is invalid.') + raise tvm.error.OpAttributeInvalid(msg) if "auto_pad" in attr: attr.pop("auto_pad") @@ -1843,9 +1845,7 @@ class Gemm(OnnxOpConverter): @classmethod def _impl_v1(cls, inputs, attr, params): - assert len(inputs) == 3 or len(inputs) == 2, "Gemm op take 2 or 3 inputs, {} given".format( - len(inputs) - ) + assert len(inputs) == 3 or len(inputs) == 2, f"Gemm op take 2 or 3 inputs, {len(inputs)} given" input0_state = infer_type(inputs[0]) dtype = input0_state.checked_type.dtype # Y = alpha * A * B + beta * C @@ -1877,7 +1877,7 @@ class MatMul(OnnxOpConverter): @classmethod def _impl_v1(cls, inputs, attr, params): - assert len(inputs) == 2, "MatMul op take 2 inputs, {} given".format(len(inputs)) + assert len(inputs) == 2, f"MatMul op take 2 inputs, {len(inputs)} given" # Need to check input shape as batch matmul must be supported. return matmul_out_dtype(inputs, out_dtype=infer_type(inputs[0]).checked_type.dtype) @@ -1887,7 +1887,7 @@ class MatMulInteger16(OnnxOpConverter): @classmethod def _impl_v10(cls, inputs, attr, params): - assert len(inputs) == 2, "MatMulInteger16 op take 2 inputs, {} given".format(len(inputs)) + assert len(inputs) == 2, f"MatMulInteger16 op take 2 inputs, {len(inputs)} given" a_dtype = infer_type(inputs[0]).checked_type.dtype b_dtype = infer_type(inputs[1]).checked_type.dtype # Check input data types @@ -1904,7 +1904,7 @@ class Mod(OnnxOpConverter): @classmethod def _impl_v1(cls, inputs, attr, params): - assert len(inputs) == 2, "Mod op take 2 inputs, {} given".format(len(inputs)) + assert len(inputs) == 2, f"Mod op take 2 inputs, {len(inputs)} given" # Note: attr['fmod'] determines whether the operator should behave like np.fmod or np.mod. # attr['fmod'] == 0 will behave as np.mod and attr['fmod'] == 1 will force fmod treatment. @@ -2007,8 +2007,9 @@ def _impl_v1(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = 'Value {} in attribute "auto_pad" of operator {} is invalid.' - raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"], "LpPool")) + msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator LpPool ' + f'is invalid.') + raise tvm.error.OpAttributeInvalid(msg) attr.pop("auto_pad") if "storage_order" in attr: @@ -2163,7 +2164,7 @@ class Prelu(OnnxOpConverter): @classmethod def _impl_v1(cls, inputs, attr, params): - assert len(inputs) == 2, "Prelu need 2 inputs, {} given".format(len(inputs)) + assert len(inputs) == 2, f"Prelu need 2 inputs, {len(inputs)} given" input_shape = shape_of(inputs[0]) alpha = _op.broadcast_to_like(inputs[1], inputs[0]) alpha = _op.reshape(alpha, [-1]) @@ -2397,7 +2398,7 @@ def _impl_v9(cls, inputs, attr, params): if not scales: # Here we are going to higher OPSET version. - assert len(inputs) == 2, "Upsample op takes 2 inputs, {} given".format(len(inputs)) + assert len(inputs) == 2, f"Upsample op takes 2 inputs, {len(inputs)} given" if get_name(inputs[1]) in params: scales = params[inputs[1].name_hint].numpy() @@ -2415,7 +2416,7 @@ def _impl_v9(cls, inputs, attr, params): method = "trilinear" if dims == 5 else "bilinear" else: raise tvm.error.OpAttributeInvalid( - 'Value {} in attribute "mode" of operator Upsample is not valid.'.format(mode) + f'Value {mode} in attribute "mode" of operator Upsample is not valid.' ) # in 3d case, we use the purely static op @@ -2510,7 +2511,7 @@ def _impl_v6(cls, inputs, attr, params): try: from onnx import TensorProto except ImportError as e: - raise ImportError("Unable to import TensorProto from onnx {}".format(e)) + raise ImportError(f"Unable to import TensorProto from onnx {e}") # If onnx mapping is used, bfloat16 gets converted to float16 # which is not the desired behavior @@ -2522,7 +2523,7 @@ def _impl_v6(cls, inputs, attr, params): attr["to"] = str(TENSOR_TYPE_TO_NP_TYPE[attr["to"]]) except ImportError as e: - raise ImportError("Unable to import onnx.mapping which is required {}".format(e)) + raise ImportError(f"Unable to import onnx.mapping which is required {e}") return AttrCvt(op_name="cast", transforms={"to": "dtype"})(inputs, attr) @@ -2851,9 +2852,7 @@ class Scatter(OnnxOpConverter): @classmethod def _args_check(cls, inputs, attr): - assert len(inputs) == 3, "Scatter takes 3 inputs (data, indices, updates), {} given".format( - len(inputs) - ) + assert len(inputs) == 3, f"Scatter takes 3 inputs (data, indices, updates), {len(inputs)} given" assert infer_type(inputs[1]).checked_type.dtype in ["int32", "int64"] data_rank = len(infer_shape(inputs[0])) @@ -2889,7 +2888,7 @@ def _args_check(cls, inputs, attr, red_valids=None): ret = [] assert ( len(inputs) == 3 - ), "ScatterElements takes 3 inputs (data, indices, updates), {} given".format(len(inputs)) + ), f"ScatterElements takes 3 inputs (data, indices, updates), {len(inputs)} given" assert infer_type(inputs[1]).checked_type.dtype in ["int32", "int64"] axis = attr.get("axis", 0) @@ -2903,9 +2902,7 @@ def _args_check(cls, inputs, attr, red_valids=None): if reduction is None: reduction = b"update" reduction = reduction.decode("utf-8") - assert reduction in red_valids, "Only {} modes are supported, but {} is gotten".format( - red_valids, reduction - ) + assert reduction in red_valids, f"Only {red_valids} modes are supported, but {reduction} is gotten" ret.append(reduction) return ret @@ -2936,7 +2933,7 @@ class ScatterND(OnnxOpConverter): def _inputs_check(cls, inputs): assert ( len(inputs) == 3 - ), "ScatterND takes 3 inputs (data, indices, updates), {} given".format(len(inputs)) + ), f"ScatterND takes 3 inputs (data, indices, updates), {len(inputs)} given" assert infer_type(inputs[1]).checked_type.dtype == "int64" data_rank = len(infer_shape(inputs[0])) @@ -2956,9 +2953,7 @@ def _reduction_check(cls, attr, red_valids=None): reduction = reduction.decode("utf-8") if red_valids is None: red_valids = ["update"] - assert reduction in red_valids, "Only {} reductions are supported, but {} is gotten".format( - red_valids, reduction - ) + assert reduction in red_valids, f"Only {red_valids} reductions are supported, but {reduction} is gotten" return reduction @@ -3680,9 +3675,8 @@ def _get_activations(cls, attr, multiplier, num_directions, rnn_type): activations = attr["activations"] if len(activations) != multiplier * num_directions: raise NotImplementedError( - "{} assumes {} * num_directions activation functions are provided".format( - rnn_type, multiplier - ) + f"{rnn_type} assumes {multiplier} * num_directions activation functions " + f"are provided" ) alpha_loc = 0 alphas = attr.get("activation_alpha", []) @@ -4044,7 +4038,7 @@ def _impl_v10(cls, inputs, attr, params): method = "cubic" else: raise tvm.error.OpAttributeInvalid( - 'Value {} in attribute "mode" of operator Resize is not valid.'.format(mode) + f'Value {mode} in attribute "mode" of operator Resize is not valid.' ) scale = inputs[1] @@ -4119,7 +4113,7 @@ def v11_13_common(cls, inputs, size, attr, params): method = "cubic" else: raise tvm.error.OpAttributeInvalid( - 'Value {} in attribute "mode" of operator Resize is not valid.'.format(mode) + f'Value {mode} in attribute "mode" of operator Resize is not valid.' ) coord_trans = attr.get("coordinate_transformation_mode", b"half_pixel").decode("ascii") @@ -4299,7 +4293,7 @@ class MaxRoiPool(OnnxOpConverter): @classmethod def _impl_v1(cls, inputs, attr, params): - assert len(inputs) == 2, "MMaxRoiPool op take 2 inputs, {} given".format(len(inputs)) + assert len(inputs) == 2, f"MMaxRoiPool op take 2 inputs, {len(inputs)} given" data = inputs[0] rois = inputs[1] @@ -5074,7 +5068,7 @@ def _op_dispatch(cls, operator, inputs, attr, params): "reshape": cls._reshape, "embedding_bag": cls._embedding_bag, } - assert operator in op_map, "Operator %s is not supported." % operator + assert operator in op_map, f"Operator {operator} is not supported." return op_map[operator](inputs, attr, params) @classmethod @@ -5277,8 +5271,9 @@ def _impl_v10(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.' - raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"])) + msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' + f'is invalid.') + raise tvm.error.OpAttributeInvalid(msg) attr.pop("auto_pad") out_channels = kernel_shapes[0][0] @@ -5631,7 +5626,7 @@ def _impl_v10(cls, inputs, attr, params): ), "MatMulInteger: input dtype doesn't match zero point dtype" elif len(inputs) != 2: raise AssertionError( - "MatMulInteger op takes 2 or 4 inputs, {} given".format(len(inputs)) + f"MatMulInteger op takes 2 or 4 inputs, {len(inputs)} given" ) inputs = [ @@ -5804,8 +5799,9 @@ def _impl_v10(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.' - raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"])) + msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' + f'is invalid.') + raise tvm.error.OpAttributeInvalid(msg) attr.pop("auto_pad") out_channels = kernel_shape[0] @@ -5842,14 +5838,14 @@ class BitwiseBase(OnnxOpConverter): @classmethod def check_inputs(cls, inputs, num=2, use_int=True): - assert len(inputs) == num, "{} takes {} inputs, {} given".format(cls.name, num, len(inputs)) + assert len(inputs) == num, f"{cls.name} takes {num} inputs, {len(inputs)} given" valid_types = ["uint8", "uint16", "uint32", "uint64"] if use_int: valid_types += ["int8", "int16", "int32", "int64"] for i in range(num): in_dtype = infer_type(inputs[i]).checked_type.dtype - assert in_dtype in valid_types, "Wrong dtype of the {}-th input: {}".format(i, in_dtype) + assert in_dtype in valid_types, f"Wrong dtype of the {i}-th input: {in_dtype}" class BitShift(BitwiseBase): @@ -7118,9 +7114,7 @@ def _construct_nodes(self, graph): node_output = [output for output in node_output if output != ""] assert ( len(node_output) == outputs_num - ), "Number of output mismatch {} vs {} in {}.".format( - len(node_output), outputs_num, op_name - ) + ), f"Number of output mismatch {len(node_output)} vs {outputs_num} in {op_name}." if outputs_num == 1: self._nodes[node_output[0]] = op @@ -7173,9 +7167,9 @@ def _parse_attr(self, attr_proto): attrs[a.name] = tuple(getattr(a, f)) for f in ["graphs"]: if list(getattr(a, f)): - raise NotImplementedError("Field {} is not supported in relay.".format(f)) + raise NotImplementedError(f"Field {f} is not supported in relay.") if a.name not in attrs: - raise ValueError("Cannot parse attribute: \n{}\n.".format(a)) + raise ValueError(f"Cannot parse attribute: \n{a}\n.") return attrs def _convert_operator(self, op_name, inputs, attrs, opset): @@ -7205,7 +7199,7 @@ def _convert_operator(self, op_name, inputs, attrs, opset): elif op_name in convert_map: sym = convert_map[op_name](inputs, attrs, self._params) else: - raise NotImplementedError("Operator {} not implemented.".format(op_name)) + raise NotImplementedError(f"Operator {op_name} not implemented.") return sym def _fix_outputs(self, op_name, outputs): @@ -7231,7 +7225,7 @@ def export_model(location, graph): os.makedirs(location) time_stamp = datetime.datetime.now().strftime("%m_%d_%Y_%H_%M_%S") model = helper.make_model(graph) - save(model, os.path.join(location, "tvm_exported_model_{}.onnx".format(time_stamp))) + save(model, os.path.join(location, f"tvm_exported_model_{time_stamp}.onnx") def from_onnx( diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index adff28187e5b..4aa06388fb73 100755 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -82,7 +82,7 @@ def _convert_dtype_value(val): 0: "bool", } if val not in convert_dtype_map: - msg = "Paddle data type value %d is not handled yet." % (val) + msg = f"Paddle data type value {val} is not handled yet." raise NotImplementedError(msg) return convert_dtype_map[val] @@ -324,8 +324,8 @@ def convert_conv2d(g, op, block): elif len(paddings) == 4: paddings = [paddings[0], paddings[2], paddings[1], paddings[3]] else: - msg = 'Value {} in attribute "padding" of operator Conv is not "valid."' - raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm)) + msg = f'Value {padding_algorithm} in attribute "padding" of operator Conv is not "valid."' + raise tvm.error.OpAttributeInvalid(msg) out = _op.nn.conv2d( input_x, @@ -383,8 +383,8 @@ def convert_conv2d_transpose(g, op, block): elif len(paddings) == 4: paddings = [paddings[0], paddings[2], paddings[1], paddings[3]] else: - msg = 'Value {} in attribute "padding" of operator Conv is not "valid."' - raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm)) + msg = f'Value {padding_algorithm} in attribute "padding" of operator Conv is not "valid."' + raise tvm.error.OpAttributeInvalid(msg) out = _op.nn.conv2d_transpose( input_x, @@ -438,8 +438,8 @@ def convert_conv3d(g, op, block): paddings[5], ] else: - msg = 'Value {} in attribute "padding" of operator Conv is not "valid."' - raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm)) + msg = f'Value {padding_algorithm} in attribute "padding" of operator Conv is not "valid."' + raise tvm.error.OpAttributeInvalid(msg) out = _op.nn.conv3d( input_x, @@ -957,8 +957,8 @@ def get_interpolate_mode(op): else: coordinate_transformation_mode = "half_pixel" else: - msg = "interp_method {} is not supported for PaddlePaddle's interpolate" - raise tvm.error.OpAttributeInvalid(msg.format(interp_method)) + msg = f"interp_method {interp_method} is not supported for PaddlePaddle's interpolate" + raise tvm.error.OpAttributeInvalid(msg) return rounding_method, interp_method, coordinate_transformation_mode layout = op.attr("data_layout") @@ -1480,8 +1480,8 @@ def convert_pool2d(g, op, block): elif len(paddings) == 4: paddings = [paddings[0], paddings[2], paddings[1], paddings[3]] else: - msg = 'Value {} in attribute "padding" of operator Pool2d is not "valid."' - raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm)) + msg = f'Value {padding_algorithm} in attribute "padding" of operator Pool2d is not "valid."' + raise tvm.error.OpAttributeInvalid(msg) # handle with special case # while kernel size less than input size @@ -2331,8 +2331,8 @@ def convert_tile(g, op, block): infered = True if not infered: - msg = 'Value {} in attribute "repeat_times" of operator Tile is not "valid."' - raise tvm.error.OpAttributeInvalid(msg.format(reps)) + msg = f'Value {reps} in attribute "repeat_times" of operator Tile is not "valid."' + raise tvm.error.OpAttributeInvalid(msg) op_func = get_relay_op(op.type) out = op_func(x, reps=reps) @@ -2670,10 +2670,8 @@ def check_input_shape(self, op, block): ipt_shape = block.var(ipt_name).shape for i in ipt_shape: if i < 0: - warning_msg = "Input {}(shape={}) has unkown dimension shapes. \ - Specifying static values may improve performance".format( - ipt_name, ipt_shape - ) + warning_msg = (f"Input {ipt_name}(shape={ipt_shape}) has unkown dimension shapes. " + f"Specifying static values may improve performance") warnings.warn(warning_msg) def check_unsupported_ops(self, program): diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index 5e2e6a5f5e2c..328f075725bc 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -338,7 +338,7 @@ def square(self, inputs, input_types): def lerp(self, inputs, input_types): if len(inputs) != 3: - msg = "Wrong number of arguments (%d) to parse." % (len(inputs)) + msg = f"Wrong number of arguments ({len(inputs)}) to parse." raise AssertionError(msg) start = inputs[0] @@ -388,7 +388,7 @@ def _get_type(val, inp_type): stop = _get_value(inputs[1 if len(inputs) > 5 else 0], dtype) step = _get_value(inputs[2], dtype) if len(inputs) > 6 else _expr.const(1, dtype) else: - msg = "Unknown number of arguments (%d) to parse." % (len(inputs)) + msg = f"Unknown number of arguments ({len(inputs}) to parse." raise AssertionError(msg) return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype) @@ -585,9 +585,8 @@ def tensor_split(self, inputs, input_types): import torch if not isinstance(inputs[1], (int, list, tuple, torch.Tensor)): - msg = "indices_or_sections type %s could not be parsed in tensor_split op" % ( - type(inputs[1]) - ) + msg = (f"indices_or_sections type {type(inputs[1])} could not be parsed in " + f"tensor_split op") raise AssertionError(msg) if isinstance(inputs[1], torch.Tensor) and not ( @@ -761,7 +760,7 @@ def ones(self, inputs, input_types): import torch if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)): - msg = "Data type %s could not be parsed in ones op" % (type(data)) + msg = f"Data type {type(data)} could not be parsed in ones op" raise AssertionError(msg) if inputs[1] is not None: @@ -790,7 +789,7 @@ def new_ones(self, inputs, input_types): import torch if not isinstance(size, (_expr.Expr, list, tuple, torch.Size, np.ndarray)): - msg = "Data type %s could not be parsed in ones op" % (type(size)) + msg = f"Data type {type(size)} could not be parsed in ones op" raise AssertionError(msg) if inputs[2] is not None: @@ -805,7 +804,7 @@ def zeros(self, inputs, input_types): import torch if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)): - msg = "Data type %s could not be parsed in zeros op" % (type(data)) + msg = f"Data type {type(data)} could not be parsed in zeros op" raise AssertionError(msg) if inputs[1] is not None: @@ -838,7 +837,7 @@ def new_zeros(self, inputs, input_types): import torch if not isinstance(data, (_expr.Expr, list, tuple, torch.Size)): - msg = "Data type %s could not be parsed in new_zeros op" % (type(data)) + msg = f"Data type {type(data)} could not be parsed in new_zeros op" raise AssertionError(msg) if inputs[2] is not None: @@ -855,7 +854,7 @@ def full(self, inputs, input_types): import torch if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)): - msg = "Data type %s could not be parsed in full op" % (type(data)) + msg = f"Data type {type(data)} could not be parsed in full op" raise AssertionError(msg) if inputs[2] is not None: # dtype given @@ -889,7 +888,7 @@ def new_full(self, inputs, input_types): import torch if not isinstance(data, (_expr.Expr, list, tuple, torch.Size)): - msg = "Data type %s could not be parsed in full op" % (type(data)) + msg = f"Data type {type(data)} could not be parsed in full op" raise AssertionError(msg) if inputs[3] is not None: # dtype given @@ -1217,7 +1216,7 @@ def convolution(self, inputs, input_types): for infer in inferred_shape: weight_shape.append(infer) else: - msg = "Data type %s could not be parsed in conv op" % (type(weight)) + msg = f"Data type {type(weight)} could not be parsed in conv op" raise AssertionError(msg) groups = int(inputs[8]) @@ -1412,7 +1411,7 @@ def get_dims(self, data): elif isinstance(data, (torch.Tensor, np.ndarray)): dims = data.shape else: - msg = "Data type %s could not be parsed" % type(data) + msg = f"Data type {type(data)} could not be parsed" raise AssertionError(msg) return dims @@ -1461,7 +1460,7 @@ def transpose(self, inputs, input_types): elif isinstance(data, (torch.Tensor, np.ndarray)): ndims = data.shape else: - msg = "Data type %s could not be parsed in transpose op" % (type(data)) + msg = f"Data type {type(data)} could not be parsed in transpose op" raise AssertionError(msg) if isinstance(data, tvm.runtime.NDArray): @@ -2596,8 +2595,8 @@ def nonzero_numpy(self, inputs, input_types): def scatter(self, inputs, input_types): assert len(inputs) == 4 or len(inputs) == 5, ( - "scatter takes 4 or 5 inputs: data, dim, index, src, reduce (optional), " - + "but {} given".format(len(inputs)) + f"scatter takes 4 or 5 inputs: data, dim, index, src, reduce (optional), " + f"but {len(inputs)} given" ) data = inputs[0] axis = int(inputs[1]) @@ -2739,7 +2738,7 @@ def new_empty(self, inputs, input_types): import torch if not isinstance(size, (_expr.Expr, list, tuple, torch.Size, np.ndarray)): - msg = "Data type %s could not be parsed in empty op" % (type(size)) + msg = f"Data type {type(size)} could not be parsed in empty op" raise AssertionError(msg) if inputs[2] is not None: @@ -2786,7 +2785,7 @@ def bincount(self, inputs, input_types): def scatter_add(self, inputs, input_types): assert ( len(inputs) == 4 - ), "scatter_add takes 4 inputs (data, dim, index, src), but {} given".format(len(inputs)) + ), f"scatter_add takes 4 inputs (data, dim, index, src), but {len(inputs)} given" data = inputs[0] axis = inputs[1] index = inputs[2] @@ -2817,8 +2816,8 @@ def scatter_add(self, inputs, input_types): def scatter_reduce(self, inputs, input_types): assert len(inputs) == 5 or len(inputs) == 6, ( - "scatter_reduce takes 5 or 6 inputs (data, dim, index, src, reduce, include_self), " - + "but {} given".format(len(inputs)) + f"scatter_reduce takes 5 or 6 inputs (data, dim, index, src, reduce, include_self), " + f"but {len(inputs)} given" ) data = inputs[0] dim = inputs[1] @@ -2849,9 +2848,7 @@ def scatter_reduce(self, inputs, input_types): ), "Index dim size should be less than data one" red_valids = ["sum", "prod", "mean", "amax", "amin"] - assert reduce in red_valids, "Only {} modes are supported, but {} is gotten".format( - red_valids, reduce - ) + assert reduce in red_valids, f"Only {red_valids} modes are supported, but {reduce} is gotten" if reduce == "sum": reduce = "add" elif reduce == "prod": @@ -4046,7 +4043,7 @@ def report_missing_conversion(self, op_names): missing.append(op_name) if missing: - msg = "The following operators are not implemented: {}".format(missing) + msg = f"The following operators are not implemented: {missing}" raise NotImplementedError(msg) def convert_block(self, block, outputs): @@ -4373,7 +4370,7 @@ def _convert_dtype_value(val): if val in convert_torch_dtype_map: return _convert_data_type(convert_torch_dtype_map[val]) else: - msg = "Torch data type value %d is not handled yet." % (val) + msg = f"Torch data type value {val} is not handled yet." raise NotImplementedError(msg) @@ -4412,7 +4409,7 @@ def _convert_data_type(input_type, default_dtype=None): elif input_type in ["str"]: return "str" else: - raise NotImplementedError("input_type {} is not handled yet".format(input_type)) + raise NotImplementedError(f"input_type {input_type} is not handled yet") return "float32" # Never reached @@ -4437,7 +4434,7 @@ def _create_typed_const(data, dtype): elif dtype == "uint8": typed_data = _expr.const(np.uint8(data), dtype=dtype) else: - raise NotImplementedError("input_type {} is not handled yet".format(dtype)) + raise NotImplementedError(f"input_type {dtype} is not handled yet") return typed_data @@ -4577,7 +4574,7 @@ def _get_constant(node): elif ty == "FunctionType": return None else: - raise NotImplementedError("Unsupported type: %s" % ty) + raise NotImplementedError(f"Unsupported type: {ty}") else: assert num_attributes == 0 return None @@ -4671,9 +4668,7 @@ def _get_relay_input_vars(graph, input_infos, prelude, is_module=True, default_d raise RuntimeError(msg) if len(graph_inputs) != len(input_infos): - msg = "PyTorch has {} inputs and input_infos lists {}.".format( - len(graph_inputs), len(input_infos) - ) + msg = f"PyTorch has {len(graph_inputs)} inputs and input_infos lists {len(input_infos)}." raise RuntimeError(msg) def get_relay_ty(ishape, itype, pt_type): @@ -4721,12 +4716,12 @@ def get_relay_ty(ishape, itype, pt_type): new_input_infos = [] for num, inp in enumerate(input_infos): if not isinstance(inp, tuple): - msg = "Graph input {} is not a tuple".format(num) + msg = f"Graph input {num} is not a tuple" raise RuntimeError(msg) if len(inp) != 2 or not isinstance(inp[0], str): msg = ( - "Graph input {} is not valid," - " expected ('name', shape) or ('name', (shape, dtype))".format(inp) + f"Graph input {inp} is not valid," + f" expected ('name', shape) or ('name', (shape, dtype))" ) raise RuntimeError(msg) if not isinstance(inp[1], tuple) or len(inp[1]) == 0 or not isinstance(inp[1][-1], str): @@ -4883,7 +4878,7 @@ def export_c_graph(location, graph): if not os.path.exists(location): os.makedirs(location) time_stamp = datetime.datetime.now().strftime("%m_%d_%Y_%H_%M_%S") - fname = os.path.join(location, "tvm_exported_c_graph_{}.txt".format(time_stamp)) + fname = os.path.join(location, f"tvm_exported_c_graph_{time_stamp}.txt") with open(f"{fname}", "w") as f: f.write(str(graph)) diff --git a/python/tvm/relay/frontend/qnn_torch.py b/python/tvm/relay/frontend/qnn_torch.py index 131a471fd5c3..f5628a5919bc 100644 --- a/python/tvm/relay/frontend/qnn_torch.py +++ b/python/tvm/relay/frontend/qnn_torch.py @@ -381,7 +381,7 @@ def _add_output_quant_params_to_scalar_op(node, graph, input_scale, input_zero_p input_scale, input_zero_point, scalar ) else: - raise NotImplementedError("unsupported scalar op: %s" % operator) + raise NotImplementedError(f"unsupported scalar op: {operator}") # create new constant nodes and add them to graph out_scale_node = graph.create("prim::Constant") @@ -569,7 +569,7 @@ def get_full_attr_name(current): if "_scale" in out_name or "_zero_point" in out_name: full_attr = param_debug_name_map[get_full_attr_name(node)] - assert full_attr in params, "%s not found in param dict." % full_attr + assert full_attr in params, f"{full_attr} not found in param dict." param_np = params[full_attr].numpy() new_const_node = graph.create("prim::Constant") new_const_node.insertBefore(node) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 1e2a2d4f826f..d17b6a01132f 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -344,7 +344,7 @@ def _while_loop(self): # This can happen when loop var node name is set accidentally # beginning with loop name. if lv not in self._lvar2expr[self._loop_name]: - var_name = "{}_loop_var_{}".format(self._loop_name, i) + var_name = f"{self._loop_name}_loop_var_{i}" var_type = _infer_type(lv, self._mod).checked_type loop_var = set_span(tvm.relay.var(var_name, type_annotation=var_type), var_name) self._lvar2expr[self._loop_name][loop_var] = lv @@ -464,7 +464,7 @@ def _get_relay_func(self, graph, layout="NHWC", shape=None, outputs=None): try: from tensorflow.python.framework import tensor_util except ImportError as e: - raise ImportError("Unable to import tensorflow which is required {}".format(e)) + raise ImportError(f"Unable to import tensorflow which is required {e}") missing_operators = self._parse_import_prerequisites(graph) control_flow_nodes = [] @@ -479,12 +479,12 @@ def _get_relay_func(self, graph, layout="NHWC", shape=None, outputs=None): freezed_ops = [op for op in missing_operators if op in _freezed_graph_pruned_op_list] if freezed_ops: raise Exception( - "Graph is not frozen. Provide a frozen graph. " - "Found operators {}".format(freezed_ops) + f"Graph is not frozen. Provide a frozen graph. " + f"Found operators {freezed_ops}" ) raise NotImplementedError( - "The following operators are not implemented: {}".format(missing_operators) + f"The following operators are not implemented: {missing_operators}" ) for node in graph.node: @@ -535,8 +535,8 @@ def _get_relay_func(self, graph, layout="NHWC", shape=None, outputs=None): self._output_shapes[node.name] = [self._input_shapes[node.name]] if shape and node.name in shape: warnings.warn( - "Ignore the passed shape. Shape in graphdef " - "will be used for operator %s." % node.name + f"Ignore the passed shape. Shape in graphdef " + f"will be used for operator {node.name}." ) for key, value in node.attr.items(): self._parse_param(key, value, node.name, self._in_shape) @@ -700,7 +700,7 @@ def _parse_param(self, key, value, name, shape): try: from tensorflow.python.framework import tensor_util except ImportError as e: - raise ImportError("Unable to import tensorflow which is required {}".format(e)) + raise ImportError(f"Unable to import tensorflow which is required {e}") if key == "value": np_array = tensor_util.MakeNdarray(value.tensor) @@ -733,7 +733,7 @@ def _parse_param(self, key, value, name, shape): else: if key not in ("dtype", "_output_shapes", "_class"): raise NotImplementedError( - "Other attributes for a Const(param) Node {} ? .".format(key) + f"Other attributes for a Const(param) Node {key} ? ." ) def _get_attr(self, buf): @@ -757,7 +757,7 @@ def _get_attr(self, buf): try: from tensorflow.python.framework import dtypes except ImportError as e: - raise ImportError("Unable to import tensorflow which is required {}".format(e)) + raise ImportError(f"Unable to import tensorflow which is required {e}") # Treat an empty oneof value as an empty list. if not x.WhichOneof("value"): @@ -906,7 +906,7 @@ def _convert_control_flow_operator(self, node, inputs, attrs, control_flow_node_ op = self._licm_construct(plname, node.input[0]) self._loops[node_name_prefix].body.append(op) else: - raise Exception("Cannot identify control flow operator: " + "{}".format(node.op)) + raise Exception(f"Cannot identify control flow operator: {node.op}") return op @@ -936,7 +936,7 @@ def _partition_call_operator(self, inputs, attr): try: from tensorflow.python.framework import function_def_to_graph except ImportError as e: - raise ImportError("Unable to import tensorflow which is required {}".format(e)) + raise ImportError(f"Unable to import tensorflow which is required {e}") main_graph_proto = self._main_graph_proto outer_graph_def = main_graph_proto._graph @@ -964,7 +964,7 @@ def _partition_call_operator(self, inputs, attr): input_expr_dict[f_arg.name] = input subgraph_shape_dict[f_arg.name] = _infer_shape(input, main_graph_proto._mod) - func_name = "func_{}".format(func.signature.name) + func_name = f"func_{func.signature.name}" try: global_func = main_graph_proto._mod[func_name] sub_func = global_func @@ -988,14 +988,14 @@ def _partition_call_operator(self, inputs, attr): elif param_name in sub_params.keys(): param_exprs.append(param_expr) else: - raise Exception("Input parameter {} not found".format(param_name)) + raise Exception(f"Input parameter {param_name} not found") sb = tvm.relay.scope_builder.ScopeBuilder() loop_ret = global_func(*param_exprs) sb.ret(loop_ret) ret = sb.get() else: - raise Exception("Function not found - {}".format(node_func_name)) + raise Exception(f"Function not found - {node_func_name}") return ret def _convert_operator( @@ -1039,7 +1039,7 @@ def _convert_operator( elif op_name in ["PartitionedCall", "StatefulPartitionedCall"]: sym = self._partition_call_operator(inputs, attrs) else: - raise NotImplementedError("Operator {} not implemented.".format(op_name)) + raise NotImplementedError(f"Operator {op_name} not implemented.") sym = set_span(sym, node_name) @@ -1076,7 +1076,7 @@ def _licm_construct(self, loop_name, node_name): self._lname_map[loop_name] = {} if node_name not in self._lname_map[loop_name]: - var_name = "{}_loop_var".format(node_name) + var_name = f"{node_name}_loop_var" var_type = _infer_type(actual_expr, self._mod).checked_type loop_var = set_span(tvm.relay.var(var_name, type_annotation=var_type), var_name) try: @@ -1116,7 +1116,7 @@ def _backtrack_construct(self, node_name): try: from tensorflow.python.framework import tensor_util except ImportError as e: - raise ImportError("Unable to import tensorflow which is required {}".format(e)) + raise ImportError(f"Unable to import tensorflow which is required {e}") input_op_name = node_name.split(":")[0].split("^")[-1] if input_op_name not in self._nodes: diff --git a/python/tvm/relay/frontend/tensorflow2.py b/python/tvm/relay/frontend/tensorflow2.py index ab3bb35c20ff..c4bc90367874 100644 --- a/python/tvm/relay/frontend/tensorflow2.py +++ b/python/tvm/relay/frontend/tensorflow2.py @@ -488,7 +488,7 @@ def _convert_operator(self, graph, op_name, node_name, inputs, attrs): else: sym = _convert_map_tf2[op_name](inputs, attrs, self._params, self._module.mod) else: - raise NotImplementedError("Operator {} not implemented.".format(op_name)) + raise NotImplementedError(f"Operator {op_name} not implemented.") sym = set_span(sym, node_name) return sym @@ -727,13 +727,11 @@ def _convert_function( None, ) if func is None: - raise Exception("Function not found - {}".format(node_func_name)) + raise Exception(f"Function not found - {node_func_name}") devices = set(node.device for node in func.node_def) if len(devices) > 1: raise Exception( - "node_def in function {} contains > 1 types of devices {}".format( - node_func_name, devices - ) + f"node_def in function {node_func_name} contains > 1 types of devices {devices}" ) subgraph = gdef_lib[node_func_name] @@ -748,7 +746,7 @@ def _convert_function( input_expr_dict[f_arg.name] = input_ input_types[f_arg.name] = _infer_type_with_prelude(input_, prelude) - func_name = "func_{}".format(func.signature.name) + func_name = f"func_{func.signature.name}" try: global_func = module.mod[func_name] sub_func = global_func @@ -777,7 +775,7 @@ def _convert_function( elif param_name in sub_params.keys(): param_exprs.append(param_expr) else: - raise Exception("Input parameter {} not found".format(param_name)) + raise Exception(f"Input parameter {param_name} not found") sb = tvm.relay.scope_builder.ScopeBuilder() loop_ret = global_func(*param_exprs) diff --git a/python/tvm/relay/frontend/tensorflow2_ops.py b/python/tvm/relay/frontend/tensorflow2_ops.py index 17cd112878a5..41af74add587 100644 --- a/python/tvm/relay/frontend/tensorflow2_ops.py +++ b/python/tvm/relay/frontend/tensorflow2_ops.py @@ -64,7 +64,7 @@ def _impl(inputs, attr, params, prelude): input_rank = len(input_t_shape) if input_ta_shape is None: - tensor_name = "tensor{}".format(input_rank) + tensor_name = f"tensor{input_rank}" tensor_func = prelude.get_tensor_ctor(tensor_name, dtype_str) v = tensor_func(inputs[2]) write_func = prelude.get_global_var("tensor_array_write", dtype_str) diff --git a/python/tvm/relay/frontend/tensorflow_ops.py b/python/tvm/relay/frontend/tensorflow_ops.py index 27374fad1a94..b1a92222887e 100644 --- a/python/tvm/relay/frontend/tensorflow_ops.py +++ b/python/tvm/relay/frontend/tensorflow_ops.py @@ -80,7 +80,7 @@ def _impl(attr): if len(kernel) == 3: return prefix + "3d" + surfix raise tvm.error.OpAttributeInvalid( - "Only 2D or 3D kernels are supported for operator {}".format(prefix + "2d or 3d") + f"Only 2D or 3D kernels are supported for operator {prefix}2d or 3d" ) return _impl @@ -168,7 +168,7 @@ def _impl(inputs, attr, params, mod): axis_input_value = [_get_num_param(params, inputs[1])] except (IndexError, KeyError): raise TypeError( - "Unsupported argument for `{}` : `axis` should be a constant".format(func_name) + f"Unsupported argument for `{func_name}` : `axis` should be a constant" ) out = func(inputs[0], axis=axis_input_value, keepdims=False) dtype = attr["output_type"].name @@ -181,7 +181,7 @@ def _impl(inputs, attr, params, mod): def _elemwise(name): def _impl(inputs, attr, params, mod): - assert len(inputs) == 2, "{} take 2 inputs, {} given".format(name, len(inputs)) + assert len(inputs) == 2, f"{name} take 2 inputs, {len(inputs)} given" return get_relay_op(name)(*inputs) return _impl @@ -201,8 +201,9 @@ def _impl(inputs, attr, params, mod): attr["kernel_shape"] = (attr["ksize"][2], attr["ksize"][3], attr["ksize"][4]) attr["strides"] = (attr["strides"][2], attr["strides"][3], attr["strides"][4]) else: - msg = 'Value {} of attribute "data_format" of operator Pooling ' "is not valid." - raise tvm.error.OpAttributeInvalid(msg.format(attr["data_format"])) + msg = (f'Value {attr["data_format"]} of attribute "data_format" of operator Pooling ' + f"is not valid.") + raise tvm.error.OpAttributeInvalid(msg) if attr["data_format"] == "NDHWC": input_shape = [_infer_shape(inputs[0], mod)[i] for i in (0, 4, 1, 2, 3)] inputs[0] = _op.transpose(inputs[0], axes=(0, 4, 1, 2, 3)) @@ -230,8 +231,9 @@ def _impl(inputs, attr, params, mod): attr["padding"] = [pad_d[0], pad_v[0], pad_h[0], pad_d[1], pad_v[1], pad_h[1]] else: - msg = 'Value {} in attribute "padding" of operator Pooling is ' "not valid." - raise tvm.error.OpAttributeInvalid(msg.format(attr["padding"])) + msg = (f'Value {attr["padding"]} in attribute "padding" of operator Pooling is ' + f"not valid.") + raise tvm.error.OpAttributeInvalid(msg) if name == "avg_pool": attr["count_include_pad"] = False @@ -263,8 +265,9 @@ def _impl(inputs, attr, params, mod): attr["kernel_shape"] = (attr["ksize"][2], attr["ksize"][3]) attr["strides"] = (attr["strides"][2], attr["strides"][3]) else: - msg = 'Value {} of attribute "data_format" of operator Pooling ' "is not valid." - raise tvm.error.OpAttributeInvalid(msg.format(attr["data_format"])) + msg = (f'Value {attr["data_format"]} of attribute "data_format" of operator Pooling ' + f"is not valid.") + raise tvm.error.OpAttributeInvalid(msg) if attr["_target_layout"] == "NCHW" and attr["data_format"] == "NHWC": tmp_shape = _infer_shape(inputs[0], mod) @@ -300,8 +303,9 @@ def _impl(inputs, attr, params, mod): else: attr["padding"] = [paddings[4], paddings[6], paddings[5], paddings[7]] else: - msg = 'Value {} in attribute "padding" of operator Pooling is ' "not valid." - raise tvm.error.OpAttributeInvalid(msg.format(attr["padding"])) + msg = (f'Value {attr["padding"]} in attribute "padding" of operator Pooling is ' + f"not valid.") + raise tvm.error.OpAttributeInvalid(msg) if name == "avg_pool": attr["count_include_pad"] = False @@ -410,8 +414,9 @@ def _impl(inputs, attr, params, mod): attr["dilations"] = (attr["dilations"][2], attr["dilations"][3]) attr["strides"] = (attr["strides"][2], attr["strides"][3]) else: - msg = 'Value {} in attribute "data_format" of operator Conv is ' "not valid." - raise tvm.error.OpAttributeInvalid(msg.format(attr["data_format"])) + msg = (f'Value {attr["data_format"]} in attribute "data_format" of operator Conv is ' + f"not valid.") + raise tvm.error.OpAttributeInvalid(msg) if opname == "depthwise": attr["groups"] = in_channels @@ -457,8 +462,9 @@ def _impl(inputs, attr, params, mod): else: attr["padding"] = [paddings[4], paddings[6], paddings[5], paddings[7]] else: - msg = 'Value {} in attribute "padding" of operator Conv is not ' "valid." - raise tvm.error.OpAttributeInvalid(msg.format(attr["padding"])) + msg = (f'Value {attr["padding"]} in attribute "padding" of operator Conv is not ' + f"valid.") + raise tvm.error.OpAttributeInvalid(msg) if "kernel_layout" not in attr: if opname == "conv": @@ -515,8 +521,9 @@ def _impl(inputs, attr, params, mod): attr["dilations"] = (attr["dilations"][1], attr["dilations"][2]) attr["strides"] = (attr["strides"][1], attr["strides"][2]) else: - msg = 'Value {} in attribute "data_format" of operator Dilation2D is ' "not valid." - raise tvm.error.OpAttributeInvalid(msg.format(attr["data_format"])) + msg = (f'Value {attr["data_format"]} in attribute "data_format" of operator Dilation2D is ' + f"not valid.") + raise tvm.error.OpAttributeInvalid(msg) attr["padding"] = attr["padding"].decode("utf-8") if attr["padding"] == "VALID": @@ -555,8 +562,9 @@ def _impl(inputs, attr, params, mod): attr["padding"] = [0, 0] else: - msg = 'Value {} in attribute "padding" of operator Dilation2d is not ' "valid." - raise tvm.error.OpAttributeInvalid(msg.format(attr["padding"])) + msg = (f'Value {attr["padding"]} in attribute "padding" of operator Dilation2d is not ' + f"valid.") + raise tvm.error.OpAttributeInvalid(msg) attr["kernel_layout"] = "HWI" if attr["data_format"] == "NHWC" else "IHW" out = AttrCvt( @@ -631,8 +639,9 @@ def _impl(inputs, attr, params, mod): ) attr["strides"] = (attr["strides"][2], attr["strides"][3], attr["strides"][4]) else: - msg = 'Value {} in attribute "data_format" of operator Conv is ' "not valid." - raise tvm.error.OpAttributeInvalid(msg.format(attr["data_format"])) + msg = (f'Value {attr["data_format"]} in attribute "data_format" of operator Conv is ' + f"not valid.") + raise tvm.error.OpAttributeInvalid(msg) # Fix padding attr["padding"] = attr["padding"].decode("utf-8") @@ -689,8 +698,9 @@ def _impl(inputs, attr, params, mod): paddings[9], ] else: - msg = 'Value {} in attribute "padding" of operator Conv is not ' "valid." - raise tvm.error.OpAttributeInvalid(msg.format(attr["padding"])) + msg = (f'Value {attr["padding"]} in attribute "padding" of operator Conv is not ' + f"valid.") + raise tvm.error.OpAttributeInvalid(msg) if "kernel_layout" not in attr: if opname == "conv": @@ -1031,7 +1041,7 @@ def _impl(inputs, attr, params, mod): method = attr["method"].decode() method = "nearest_neighbor" if method == "nearest" else method if method not in ["bilinear", "nearest_neighbor"]: - raise tvm.error.OpAttributeUnImplemented("Method {} is not supported".format(method)) + raise tvm.error.OpAttributeUnImplemented(f"Method {method} is not supported") layout = attr["layout"] if "layout" in attr else "NHWC" extrapolation_value = attr["extrapolation_value"] @@ -1574,7 +1584,7 @@ def _impl(inputs, attr, params, prelude): if input_shape is None: values_rank = len(values_shape) - unstack_name = "tensor_array_unstack_tensor{}".format(values_rank) + unstack_name = f"tensor_array_unstack_tensor{values_rank}" unstack_function = prelude.get_global_var(unstack_name, dtype_str) values = unstack_function(inputs[2]) tensor_array_scatter_func = prelude.get_global_var("tensor_array_scatter", dtype_str) @@ -1669,15 +1679,13 @@ def _impl(inputs, attr, params, prelude): input_rank = len(input_t_shape) if input_ta_shape is None: - tensor_name = "tensor{}".format(input_rank) + tensor_name = f"tensor{input_rank}" tensor_func = prelude.get_tensor_ctor(tensor_name, dtype_str) v = tensor_func(inputs[2]) write_func = prelude.get_global_var("tensor_array_write", dtype_str) else: input_ta_rank = len(input_ta_shape) - assert input_ta_rank == input_rank, "Shape rank mismatch: {} vs {}".format( - input_ta_rank, input_rank - ) + assert input_ta_rank == input_rank, f"Shape rank mismatch: {input_ta_rank} vs {input_rank}" static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_ta_shape) static_tensor_array_ops.register() tensor_func = static_tensor_array_ops.get_ctor("tensor_constructor") @@ -1735,15 +1743,13 @@ def _impl(inputs, attr, params, prelude): input_rank = len(value_shape) if input_ta_shape is None: - tensor_name = "tensor{}".format(input_rank) + tensor_name = f"tensor{input_rank}" tensor_ctor = prelude.get_tensor_ctor(tensor_name, dtype_str) v = tensor_ctor(inputs[1]) split_func = prelude.get_global_var("tensor_array_split", dtype_str) else: input_ta_rank = len(input_ta_shape) - assert input_ta_rank == input_rank, "Shape rank mismatch: {} vs {}".format( - input_ta_rank, input_rank - ) + assert input_ta_rank == input_rank, f"Shape rank mismatch: {input_ta_rank} vs {input_rank}" static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_ta_shape) static_tensor_array_ops.register() @@ -1925,10 +1931,7 @@ def _impl(inputs, attr, params, mod): elif s0[s0_size - i] == 1: out.appendleft(s1[s1_size - i]) else: - assert s1[s1_size - i] == 1, "Incompatible broadcast type %s and %s" % ( - s0[s0_size - i], - s1[s1_size - i], - ) + assert s1[s1_size - i] == 1, f"Incompatible broadcast type {0[s0_size - i] and {s1[s1_size - i]}" out.appendleft(s0[s0_size - i]) if s0_size < s1_size: for i in range(s0_size + 1, s1_size + 1): diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index 18742b51d04f..7bc5d70bc9b7 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -212,17 +212,16 @@ def check_unsupported_ops(self): raise_msg = "" if unsupported_ops_set: - msg = "The following operators are not supported in frontend " "TFLite: {}\n" ops = str(list(unsupported_ops_set)).strip("[,]") - raise_msg += msg.format(ops) + raise_msg += f"The following operators are not supported in frontend TFLite: {ops}\n" if dynamic_range_ops_set: - msg = ( - "The following operators are likely to have dynamic range quantization: {}. " - "If you are running an optimized graph, please turn off dynamic range quantization " - "or use full integer quantization" + ops = str(list(dynamic_range_ops_set)).strip("[,]") + raise_msg += ( + f"The following operators are likely to have dynamic range quantization: {ops}. " + f"If you are running an optimized graph, please turn off dynamic range quantization " + f"or use full integer quantization" ) - raise_msg += msg.format(str(list(dynamic_range_ops_set)).strip("[,]")) if len(raise_msg) > 0: raise tvm.error.OpNotImplemented(raise_msg) @@ -406,9 +405,8 @@ def get_tensors(self, tensors_idx_list): else: raise NotImplementedError( - "Quantized type {} (scale) and {} (zero point) not supported".format( - type(tflite_scale), type(tflite_zero_point) - ) + f"Quantized type {type(tflite_scale)} (scale) and " + f"{type(tflite_zero_point)} (zero point) not supported" ) elif tflite_scale == 0 and tflite_zero_point == 0: # Handle corner case for ops like quantized reshape whose second operand (shape) @@ -416,7 +414,7 @@ def get_tensors(self, tensors_idx_list): is_qnn_params_valid = False else: raise NotImplementedError( - "Quantized type {} not supported".format(type(tflite_scale)) + f"Quantized type {type(tflite_scale)} not supported" ) # Check that the scale and zero points are valid. @@ -448,7 +446,7 @@ def get_tensor_type_as_numpy(self, tensor_wrapper): raise ImportError("The tflite package must be installed") except KeyError: raise NotImplementedError( - "Tensor type '{}' currently not supported".format(tensor_wrapper.tensor.Type()) + f"Tensor type '{tensor_wrapper.tensor.Type()}' currently not supported" ) # pylint: disable=no-else-return @@ -493,7 +491,7 @@ def get_tensor_type_str(self, tensor_type): if tensor_type == TensorType.BOOL: return "bool" raise NotImplementedError( - "Tensor type {} is currently not supported".format(str(tensor_type)) + f"Tensor type {str(tensor_type)} is currently not supported" ) def flatten_to_nd(self, x, x_shape, nd=3): @@ -581,7 +579,7 @@ def convert_qnn_fused_activation_function( fused_activation_fn_str = self.activation_fn_type[fused_activation_fn] raise tvm.error.OpNotImplemented( - "Quantized activation {} is not supported yet.".format(fused_activation_fn_str) + f"Quantized activation {fused_activation_fn_str} is not supported yet." ) def convert_conv2d(self, op): @@ -2114,7 +2112,7 @@ def convert_fused_activation_function(self, in_expr, fused_activation_fn): return _op.tanh(in_expr) fused_activation_fn_str = self.activation_fn_type[fused_activation_fn] raise tvm.error.OpNotImplemented( - "Fused activation {} is not supported yet.".format(fused_activation_fn_str) + f"Fused activation {fused_activation_fn_str} is not supported yet." ) def convert_conv(self, op, conv_type): @@ -2156,7 +2154,7 @@ def convert_conv(self, op, conv_type): depth_multiplier = conv_options.DepthMultiplier() else: raise tvm.error.OpNotImplemented( - "Operator {} is not supported for frontend TFLite.".format(conv_type) + f"Operator {conv_type} is not supported for frontend TFLite." ) stride_h = conv_options.StrideH() @@ -2256,7 +2254,7 @@ def convert_conv(self, op, conv_type): else: raise tvm.error.OpAttributeUnImplemented( - "Padding format {} is not supported for operator Conv.".format(padding) + f"Padding format {padding} is not supported for operator Conv." ) if input_tensor.qnn_params: @@ -2588,7 +2586,7 @@ def convert_pool2d(self, op, pool_type): params["padding"] = [pad_top, pad_left, pad_bottom, pad_right] else: raise tvm.error.OpAttributeUnImplemented( - "Padding format {} for operator Pool2D is not supported.".format(padding) + f"Padding format {padding} for operator Pool2D is not supported." ) if pool_type == "average": @@ -2621,7 +2619,7 @@ def convert_pool2d(self, op, pool_type): out = _op.sqrt(avg_pool_exp) else: raise tvm.error.OpNotImplemented( - "Operator {} is not supported for frontend TFLite.".format(pool_type + " pool") + f"Operator {pool_type} pool is not supported for frontend TFLite." ) # Handle fused activations @@ -3279,7 +3277,7 @@ def convert_transpose_conv(self, op): assert padding in ( Padding.VALID, Padding.SAME, - ), "Padding format {} is not supported for operator TRANSPOSE_CONV".format(padding) + ), f"Padding format {padding} is not supported for operator TRANSPOSE_CONV" # Data in_expr = self.get_expr(input_tensor.tensor_idx) @@ -3465,9 +3463,8 @@ def convert_detection_postprocess(self, op): if "use_regular_nms" in custom_options: if custom_options["use_regular_nms"]: raise tvm.error.OpAttributeUnImplemented( - "use_regular_nms=True is not yet supported for operator {}.".format( - "TFLite_Detection_PostProcess" - ) + "use_regular_nms=True is not yet supported for operator " + "TFLite_Detection_PostProcess." ) inputs = self.get_input_tensors(op) @@ -3930,7 +3927,7 @@ def _get_vector_flag(v_type): elif VectorType(v_type) == VectorType.Uint8: return N.Uint8Flags else: - raise tvm.error.OpNotImplemented("The provided type {} is not supported".format(v_type)) + raise tvm.error.OpNotImplemented(f"The provided type {v_type} is not supported") def _get_flattened_index(indices, shape): index = 0 From cd0de1493273e4216ceb8ced8694688d37153b70 Mon Sep 17 00:00:00 2001 From: Krzysztof Parzyszek Date: Thu, 11 May 2023 10:54:34 -0700 Subject: [PATCH 2/2] Format modified files --- python/tvm/relay/frontend/caffe.py | 5 +- python/tvm/relay/frontend/caffe2.py | 4 +- python/tvm/relay/frontend/coreml.py | 2 +- python/tvm/relay/frontend/keras.py | 52 ++-- python/tvm/relay/frontend/mxnet.py | 12 +- python/tvm/relay/frontend/oneflow.py | 28 +- python/tvm/relay/frontend/onnx.py | 314 ++++++-------------- python/tvm/relay/frontend/paddlepaddle.py | 22 +- python/tvm/relay/frontend/pytorch.py | 108 ++----- python/tvm/relay/frontend/qnn_torch.py | 21 +- python/tvm/relay/frontend/tensorflow.py | 4 +- python/tvm/relay/frontend/tensorflow2.py | 19 +- python/tvm/relay/frontend/tensorflow_ops.py | 107 +++---- python/tvm/relay/frontend/tflite.py | 36 +-- 14 files changed, 221 insertions(+), 513 deletions(-) diff --git a/python/tvm/relay/frontend/caffe.py b/python/tvm/relay/frontend/caffe.py index f3af53efffae..708cc3f4f11f 100644 --- a/python/tvm/relay/frontend/caffe.py +++ b/python/tvm/relay/frontend/caffe.py @@ -417,7 +417,6 @@ def convert_pooling(self, op): else: raise tvm.error.OpNotImplemented( f"Operator {pool_type_dict[pool_type]} pool is not supported for frontend Caffe." - ) ) return out @@ -547,9 +546,7 @@ def convert_deconv(self, op): # weight shape is in relay's IOHW format rn, we need it to be OIHW weight_value = np.transpose(weight_value, [1, 0, 2, 3]) else: - raise tvm.error.OpAttributeRequired( - f"No weight value of layer {op.name} in caffemodel" - ) + raise tvm.error.OpAttributeRequired(f"No weight value of layer {op.name} in caffemodel") weight_expr = self.exp_tab.new_const(weight_value, dtype="float32") in_expr = self.exp_tab.get_expr(inputs[0]) diff --git a/python/tvm/relay/frontend/caffe2.py b/python/tvm/relay/frontend/caffe2.py index a7fcd0cc1ea7..e59aad255a80 100644 --- a/python/tvm/relay/frontend/caffe2.py +++ b/python/tvm/relay/frontend/caffe2.py @@ -287,9 +287,7 @@ def _get_axis_from_order_str(order): return AttrCvt( op_name="concatenate", - transforms={ - "order": ("axis", (1), _get_axis_from_order_str), - }, + transforms={"order": ("axis", (1), _get_axis_from_order_str)}, excludes=["add_axis"], )((inputs,), args, params) diff --git a/python/tvm/relay/frontend/coreml.py b/python/tvm/relay/frontend/coreml.py index 09cfcb7430ed..9c525182a08c 100644 --- a/python/tvm/relay/frontend/coreml.py +++ b/python/tvm/relay/frontend/coreml.py @@ -392,7 +392,7 @@ def _UnaryFunctionLayerParams(op, inexpr, etab): return _op.maximum(inexpr, alpha) else: msg = f"Unary Op type value {op_type} is not supported in frontend CoreML." - raise tvm.error.OpAttributeUnImplemented(msg)) + raise tvm.error.OpAttributeUnImplemented(msg) def _ReduceLayerParams(op, inexpr, etab): diff --git a/python/tvm/relay/frontend/keras.py b/python/tvm/relay/frontend/keras.py index 283da87d88b3..023ce52b2f6a 100644 --- a/python/tvm/relay/frontend/keras.py +++ b/python/tvm/relay/frontend/keras.py @@ -121,9 +121,7 @@ def _convert_activation( x = (_expr.const(0.2, dtype="float32") * inexpr) + _expr.const(0.5, dtype="float32") return _op.clip(x, a_min=0.0, a_max=1.0) - raise tvm.error.OpNotImplemented( - f"Operator {act_type} is not supported in frontend Keras." - ) + raise tvm.error.OpNotImplemented(f"Operator {act_type} is not supported in frontend Keras.") def _convert_advanced_activation(inexpr, keras_layer, etab, data_layout, input_shape=None): @@ -135,9 +133,7 @@ def _convert_advanced_activation(inexpr, keras_layer, etab, data_layout, input_s axis = keras_layer.axis dims = len(input_shape) if isinstance(axis, list): - raise tvm.error.OpAttributeUnImplemented( - f"Softmax with axes {axis} is not supported." - ) + raise tvm.error.OpAttributeUnImplemented(f"Softmax with axes {axis} is not supported.") if data_layout == "NCHW": if axis == -1: axis = 1 @@ -182,9 +178,7 @@ def _convert_advanced_activation(inexpr, keras_layer, etab, data_layout, input_s inexpr, _op.greater(inexpr, _expr.const(theta, dtype="float32")).astype("float32") ) - raise tvm.error.OpNotImplemented( - f"Operator {act_type} is not supported in frontend Keras." - ) + raise tvm.error.OpNotImplemented(f"Operator {act_type} is not supported in frontend Keras.") def _convert_merge( @@ -344,8 +338,10 @@ def _convert_convolution1d(inexpr, keras_layer, etab, data_layout, input_shape=N pad_w = _get_pad_pair(in_w, dilated_kernel_w, stride_w) params["padding"] = [pad_w[0], pad_w[1]] else: - msg = (f"Padding with {keras_layer.padding} is not supported for operator Convolution3D " - f"in frontend Keras.") + msg = ( + f"Padding with {keras_layer.padding} is not supported for operator Convolution3D " + f"in frontend Keras." + ) raise tvm.error.OpAttributeUnImplemented(msg) if is_deconv: @@ -435,8 +431,10 @@ def _convert_convolution(inexpr, keras_layer, etab, data_layout, input_shape=Non pad_l, pad_r = _get_pad_pair(in_w, dilated_kernel_w, stride_w) params["padding"] = (pad_t, pad_l, pad_b, pad_r) else: - msg = (f"Padding with {keras_layer.padding} is not supported for operator Convolution " - f"in frontend Keras.") + msg = ( + f"Padding with {keras_layer.padding} is not supported for operator Convolution " + f"in frontend Keras." + ) raise tvm.error.OpAttributeUnImplemented(msg) if is_deconv: out = _op.nn.conv2d_transpose(data=inexpr, **params) @@ -522,8 +520,10 @@ def _convert_convolution3d(inexpr, keras_layer, etab, data_layout, input_shape=N pad_w = _get_pad_pair(in_w, dilated_kernel_w, stride_w) params["padding"] = [pad_d[0], pad_h[0], pad_w[0], pad_d[1], pad_h[1], pad_w[1]] else: - msg = (f"Padding with {keras_layer.padding} is not supported for operator Convolution3D " - f"in frontend Keras.") + msg = ( + f"Padding with {keras_layer.padding} is not supported for operator Convolution3D " + f"in frontend Keras." + ) raise tvm.error.OpAttributeUnImplemented(msg) if is_deconv: out = _op.nn.conv3d_transpose(data=inexpr, **params) @@ -683,9 +683,7 @@ def _convert_pooling( if pool_type == "AveragePooling2D": params["count_include_pad"] = False return _op.nn.avg_pool2d(inexpr, **params) - raise tvm.error.OpNotImplemented( - f"Operator {keras_layer} is not supported for frontend Keras." - ) + raise tvm.error.OpNotImplemented(f"Operator {keras_layer} is not supported for frontend Keras.") def _convert_pooling3d( @@ -877,8 +875,10 @@ def _convert_padding( top, bottom = padding[0] left, right = padding[1] else: - msg = (f'Value {str(padding)} in attribute "padding" of operator Padding is ' - f'not valid.') + msg = ( + f'Value {str(padding)} in attribute "padding" of operator Padding is ' + f"not valid." + ) raise tvm.error.OpAttributeInvalid(msg) else: msg = f'Value {str(padding)} in attribute "padding" of operator Padding is not valid.' @@ -1347,9 +1347,7 @@ def _check_unsupported_layers(model): missing_ops.add(op_name) if missing_ops: - raise NotImplementedError( - f"The following operators are not implemented: {missing_ops}" - ) + raise NotImplementedError(f"The following operators are not implemented: {missing_ops}") def keras_op_to_relay(inexpr, keras_layer, outname, etab, data_layout): @@ -1374,9 +1372,7 @@ def keras_op_to_relay(inexpr, keras_layer, outname, etab, data_layout): """ op_name = type(keras_layer).__name__ if op_name not in _convert_map: - raise tvm.error.OpNotImplemented( - f"Operator {op_name} is not supported for frontend Keras." - ) + raise tvm.error.OpNotImplemented(f"Operator {op_name} is not supported for frontend Keras.") outs = _convert_map[op_name](inexpr, keras_layer, etab, data_layout) outs = _as_list(outs) for t_idx, out in enumerate(outs): @@ -1427,9 +1423,7 @@ def _convert_layer(keras_layer, etab, scope=""): else None ) if inbound_nodes is None: - raise TypeError( - f"Unknown layer type or unsupported Keras version : {keras_layer}" - ) + raise TypeError(f"Unknown layer type or unsupported Keras version : {keras_layer}") outs = [] for node_idx, node in enumerate(inbound_nodes): # If some nodes in imported model are not relevant to the current model, diff --git a/python/tvm/relay/frontend/mxnet.py b/python/tvm/relay/frontend/mxnet.py index f7cf39309b8b..7ef8c6134e8c 100644 --- a/python/tvm/relay/frontend/mxnet.py +++ b/python/tvm/relay/frontend/mxnet.py @@ -699,9 +699,7 @@ def _mx_leaky_relu(inputs, attrs): half = _expr.const(0.5, dtype="float32") half_x = _op.multiply(inputs[0], half) return _op.multiply(half_x, erf_plus_one) - raise tvm.error.OpNotImplemented( - f"Operator {act_type} is not supported for frontend MXNet." - ) + raise tvm.error.OpNotImplemented(f"Operator {act_type} is not supported for frontend MXNet.") def _mx_make_power(power): @@ -822,9 +820,7 @@ def _mx_dot(inputs, attrs): if rank_b == 1: if not out_shape: - out_shape = [ - 1, - ] + out_shape = [1] b = _op.expand_dims(b, axis=1) else: # Transpose matrix b if needed @@ -1718,9 +1714,7 @@ def _has_fused_activation(_attrs, _supported_activations): subgraph_activation_attrs = _get_subgraph_op(subgraphs, "Activation")["attrs"] act_type = subgraph_activation_attrs["act_type"] if act_type not in _supported_activations: - raise ValueError( - f"Fused activation {act_type} is not supported at this time" - ) + raise ValueError(f"Fused activation {act_type} is not supported at this time") has_fused_activation = True return has_fused_activation diff --git a/python/tvm/relay/frontend/oneflow.py b/python/tvm/relay/frontend/oneflow.py index 5e63eebecbeb..4f278d8249e8 100644 --- a/python/tvm/relay/frontend/oneflow.py +++ b/python/tvm/relay/frontend/oneflow.py @@ -33,15 +33,7 @@ from .. import function as _function from .. import op as _op from .. import ty as _ty -from .common import ( - AttrCvt, - Renamer, - fold_constant, - get_relay_op, - infer_shape, - infer_type, - new_var, -) +from .common import AttrCvt, Renamer, fold_constant, get_relay_op, infer_shape, infer_type, new_var __all__ = ["from_oneflow"] @@ -242,7 +234,7 @@ def _impl_v1(cls, inputs, attrs, params): return _op.nn.global_avg_pool3d(inputs[0]) raise NotImplementedError( "Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD." - % (rank - 2), + % (rank - 2) ) @@ -260,7 +252,7 @@ def _impl_v1(cls, inputs, attrs, params): return _op.nn.global_max_pool3d(inputs[0]) raise NotImplementedError( "Global max pooling is only implemented for 1D, 2D, and 3D kernels, got %dD." - % (rank - 2), + % (rank - 2) ) @@ -314,9 +306,7 @@ def _impl_v1(cls, inputs, attrs, params): out = AttrCvt( op_name=cls.name, - transforms={ - "group": ("groups", 1), - }, + transforms={"group": ("groups", 1)}, ignores=["data_format", "filters", "padding_after", "padding_before"], custom_check=dimension_constraint(), )([data, kernel], attrs, params) @@ -364,9 +354,7 @@ def _impl_v1(cls, inputs, attrs, params): out = AttrCvt( op_name=cls.name, - transforms={ - "group": ("groups", 1), - }, + transforms={"group": ("groups", 1)}, disables=["filters", "data_format", "padding_before"], custom_check=dimension_constraint(), )([data, kernel], attrs, params) @@ -1539,11 +1527,7 @@ def deal_with_input_convert( or "_input." in node_input or "FreeEagerTensor" in node_input ): - _nodes[node_input] = new_var( - node_input, - shape=node_input_shape, - dtype=node_input_dtype, - ) + _nodes[node_input] = new_var(node_input, shape=node_input_shape, dtype=node_input_dtype) else: names = _input_path_2_name[node_path] node_replace = None diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 031158b666bf..10f85ad6ce6b 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -68,7 +68,7 @@ # Change this flag to False to directly convert to `nn.batch_matmul`. # Note that `nn.batch_matmul` with format other than NT is in experimental, it may have some # performance issues. - "use_nt_batch_matmul": True, + "use_nt_batch_matmul": True } @@ -312,22 +312,10 @@ def matmul_out_dtype(inputs, out_dtype): b = inputs[1] # broadcast a and b a_broadcasted_shape = fold_constant( - _op.concatenate( - [ - out_batch, - _op.strided_slice(a_shape, [a_rank - 2], [a_rank]), - ], - 0, - ) + _op.concatenate([out_batch, _op.strided_slice(a_shape, [a_rank - 2], [a_rank])], 0) ) b_broadcasted_shape = fold_constant( - _op.concatenate( - [ - out_batch, - _op.strided_slice(b_shape, [b_rank - 2], [b_rank]), - ], - 0, - ) + _op.concatenate([out_batch, _op.strided_slice(b_shape, [b_rank - 2], [b_rank])], 0) ) if not tvm.ir.structural_equal(a_shape, a_broadcasted_shape): a = _op.transform.broadcast_to(a, a_broadcasted_shape) @@ -452,22 +440,10 @@ def qmatmul( else: # broadcast a and b a_broadcasted_shape = fold_constant( - _op.concatenate( - [ - out_batch, - _op.strided_slice(a_shape, [a_rank - 2], [a_rank]), - ], - 0, - ) + _op.concatenate([out_batch, _op.strided_slice(a_shape, [a_rank - 2], [a_rank])], 0) ) b_broadcasted_shape = fold_constant( - _op.concatenate( - [ - out_batch, - _op.strided_slice(b_shape, [b_rank - 2], [b_rank]), - ], - 0, - ) + _op.concatenate([out_batch, _op.strided_slice(b_shape, [b_rank - 2], [b_rank])], 0) ) if not tvm.ir.structural_equal(a_shape, a_broadcasted_shape): a = _op.transform.broadcast_to(a, a_broadcasted_shape) @@ -480,13 +456,7 @@ def qmatmul( bt = _op.transpose(b, [0, 2, 1]) # Perform a NT batch matmul. output = _qnn.op.batch_matmul( - a, - bt, - a_zp_scalar, - b_zp_scalar, - a_scale_scalar, - b_scale_scalar, - matmul_result_dtype, + a, bt, a_zp_scalar, b_zp_scalar, a_scale_scalar, b_scale_scalar, matmul_result_dtype ) # Reshape output to original dimensions. final_shape = _op.concatenate( @@ -546,10 +516,7 @@ def layer_norm(x, eps, gamma, beta): """ eps_dtype = infer_type(x).checked_type.dtype u, s = _op.mean_variance(x, axis=-1, keepdims=True) - output = _op.divide( - _op.subtract(x, u), - _op.sqrt(_op.add(s, _op.const(eps, dtype=eps_dtype))), - ) + output = _op.divide(_op.subtract(x, u), _op.sqrt(_op.add(s, _op.const(eps, dtype=eps_dtype)))) output = _op.multiply(output, gamma) if beta is not None: output = _op.add(output, beta) @@ -610,9 +577,7 @@ def get_converter(cls, opset): version = versions[max([i for i, v in enumerate(versions) if v == opset]) - 1] if hasattr(cls, f"_impl_v{version}"): return getattr(cls, f"_impl_v{version}") - raise NotImplementedError( - f"opset version {version} of {cls.__name__} not implemented" - ) + raise NotImplementedError(f"opset version {version} of {cls.__name__} not implemented") class Unary(OnnxOpConverter): @@ -703,8 +668,10 @@ def _run_calculation(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator {cls.name} ' - f"is invalid.") + msg = ( + f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator {cls.name} ' + f"is invalid." + ) raise tvm.error.OpAttributeInvalid(msg) attr.pop("auto_pad") @@ -834,8 +801,10 @@ def _impl_v1(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' - f"is invalid.") + msg = ( + f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' + f"is invalid." + ) raise tvm.error.OpAttributeInvalid(msg) attr.pop("auto_pad") @@ -921,8 +890,10 @@ def _impl_v1(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' - f'is invalid.') + msg = ( + f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' + f"is invalid." + ) raise tvm.error.OpAttributeInvalid(msg) if "auto_pad" in attr: attr.pop("auto_pad") @@ -1002,8 +973,10 @@ def _impl_v11(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' - f'is invalid.') + msg = ( + f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' + f"is invalid." + ) raise tvm.error.OpAttributeInvalid(msg) if "auto_pad" in attr: attr.pop("auto_pad") @@ -1039,7 +1012,7 @@ def _impl_v1(cls, inputs, attr, params): return _op.nn.global_avg_pool3d(inputs[0]) raise NotImplementedError( "Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD." - % (rank - 2), + % (rank - 2) ) @@ -1072,7 +1045,7 @@ def _impl_v1(cls, inputs, attr, params): else: raise NotImplementedError( "Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD." - % (rank - 2), + % (rank - 2) ) return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=input_dtype) @@ -1091,7 +1064,7 @@ def _impl_v1(cls, inputs, attr, params): return _op.nn.global_max_pool3d(inputs[0]) raise NotImplementedError( "Global max pooling is only implemented for 1D, 2D, and 3D kernels, got %dD." - % (rank - 2), + % (rank - 2) ) @@ -1785,11 +1758,7 @@ def qmatmul_dequantize_bias( lhs, rhs_transposed, lhs_zero_point, rhs_zero_point, lhs_scale, rhs_scale ) # In our case zero point and scale are scalar, therefore 'axis' doesn't matter - result = _qnn.op.dequantize( - result, - _op.multiply(lhs_scale, rhs_scale), - zero_point_zero, - ) + result = _qnn.op.dequantize(result, _op.multiply(lhs_scale, rhs_scale), zero_point_zero) result = _op.add(result, bias) return result @@ -1845,7 +1814,9 @@ class Gemm(OnnxOpConverter): @classmethod def _impl_v1(cls, inputs, attr, params): - assert len(inputs) == 3 or len(inputs) == 2, f"Gemm op take 2 or 3 inputs, {len(inputs)} given" + assert ( + len(inputs) == 3 or len(inputs) == 2 + ), f"Gemm op take 2 or 3 inputs, {len(inputs)} given" input0_state = infer_type(inputs[0]) dtype = input0_state.checked_type.dtype # Y = alpha * A * B + beta * C @@ -2007,8 +1978,10 @@ def _impl_v1(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator LpPool ' - f'is invalid.') + msg = ( + f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator LpPool ' + f"is invalid." + ) raise tvm.error.OpAttributeInvalid(msg) attr.pop("auto_pad") @@ -2074,12 +2047,7 @@ def _impl_v1(cls, inputs, attr, params): "Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.' ) - return AttrCvt( - _op.nn.pad, - transforms={ - "value": "pad_value", - }, - )(inputs, attr, params) + return AttrCvt(_op.nn.pad, transforms={"value": "pad_value"})(inputs, attr, params) @classmethod def _impl_v2(cls, inputs, attr, params): @@ -2098,12 +2066,7 @@ def _impl_v2(cls, inputs, attr, params): "Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.' ) - return AttrCvt( - "pad", - transforms={ - "value": "pad_value", - }, - )(inputs, attr, params) + return AttrCvt("pad", transforms={"value": "pad_value"})(inputs, attr, params) @classmethod def _impl_v11(cls, inputs, attr, params): @@ -2453,12 +2416,7 @@ def _impl_v9(cls, inputs, attr, params): layout = "NCHW" out = _op.nn.upsampling( - inputs[0], - scale_h, - scale_w, - layout=layout, - method=method, - align_corners=False, + inputs[0], scale_h, scale_w, layout=layout, method=method, align_corners=False ) return out @@ -2810,12 +2768,7 @@ def _impl_common(cls, data, indices, batch_dims=0): indices_shape = infer_shape(indices) indices = _op.transpose(indices, axes=[-1] + list(range(indices_dims - 1))) index_rank = indices_shape[-1] - return _op.gather_nd( - data, - indices, - batch_dims=batch_dims, - index_rank=index_rank, - ) + return _op.gather_nd(data, indices, batch_dims=batch_dims, index_rank=index_rank) @classmethod def _impl_v1(cls, inputs, attr, params): @@ -2852,7 +2805,9 @@ class Scatter(OnnxOpConverter): @classmethod def _args_check(cls, inputs, attr): - assert len(inputs) == 3, f"Scatter takes 3 inputs (data, indices, updates), {len(inputs)} given" + assert ( + len(inputs) == 3 + ), f"Scatter takes 3 inputs (data, indices, updates), {len(inputs)} given" assert infer_type(inputs[1]).checked_type.dtype in ["int32", "int64"] data_rank = len(infer_shape(inputs[0])) @@ -2902,7 +2857,9 @@ def _args_check(cls, inputs, attr, red_valids=None): if reduction is None: reduction = b"update" reduction = reduction.decode("utf-8") - assert reduction in red_valids, f"Only {red_valids} modes are supported, but {reduction} is gotten" + assert ( + reduction in red_valids + ), f"Only {red_valids} modes are supported, but {reduction} is gotten" ret.append(reduction) return ret @@ -2953,7 +2910,9 @@ def _reduction_check(cls, attr, red_valids=None): reduction = reduction.decode("utf-8") if red_valids is None: red_valids = ["update"] - assert reduction in red_valids, f"Only {red_valids} reductions are supported, but {reduction} is gotten" + assert ( + reduction in red_valids + ), f"Only {red_valids} reductions are supported, but {reduction} is gotten" return reduction @@ -3560,31 +3519,11 @@ def expand_shape(in_shape, shape): if in_dims < new_dims: in_shape = _op.concatenate( - [ - _expr.const( - [ - 1, - ] - * (new_dims - in_dims), - dtype=dtype, - ), - in_shape, - ], - axis=0, + [_expr.const([1] * (new_dims - in_dims), dtype=dtype), in_shape], axis=0 ) elif new_dims < in_dims: shape = _op.concatenate( - [ - _expr.const( - [ - 1, - ] - * (in_dims - new_dims), - dtype=dtype, - ), - shape, - ], - axis=0, + [_expr.const([1] * (in_dims - new_dims), dtype=dtype), shape], axis=0 ) new_shape = _op.maximum(in_shape, shape) return new_shape @@ -3608,47 +3547,24 @@ def _activation_helper(cls, activation, alpha, beta): @classmethod def _activation_needs_alpha(cls, activation): - needs_alpha = [ - "Affine", - "LeakyRelu", - "ThresholdedRelu", - "ScaledTanh", - "HardSigmoid", - "Elu", - ] + needs_alpha = ["Affine", "LeakyRelu", "ThresholdedRelu", "ScaledTanh", "HardSigmoid", "Elu"] return activation.decode("utf-8") in needs_alpha @classmethod def _activation_needs_beta(cls, activation): - needs_beta = [ - "Affine", - "ScaledTanh", - "HardSigmoid", - ] + needs_beta = ["Affine", "ScaledTanh", "HardSigmoid"] return activation.decode("utf-8") in needs_beta @classmethod - def bidir_rnn_cell( - cls, - input_seqs, - weight_dicts, - acts, - ): + def bidir_rnn_cell(cls, input_seqs, weight_dicts, acts): """ Bidirectional RNN cell """ seq_len = len(input_seqs) - forward_outputs, fw_H_t = rnn_cell( - input_seqs, - **weight_dicts[0], - act=acts[0], - ) + forward_outputs, fw_H_t = rnn_cell(input_seqs, **weight_dicts[0], act=acts[0]) reverse_outputs, rev_H_t = rnn_cell( - input_seqs, - **weight_dicts[1], - act=acts[1], - backwards=True, + input_seqs, **weight_dicts[1], act=acts[1], backwards=True ) final_outputs = [] @@ -3657,10 +3573,7 @@ def bidir_rnn_cell( _op.stack([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=0) ) - return ( - _op.stack(final_outputs, axis=0), - _op.stack([fw_H_t, rev_H_t], axis=0), - ) + return (_op.stack(final_outputs, axis=0), _op.stack([fw_H_t, rev_H_t], axis=0)) @classmethod def _default_activations(cls, num_directions): @@ -3766,17 +3679,11 @@ def _impl_common(cls, inputs, attr, layout): if num_directions == 2: output, H = RNN.bidir_rnn_cell( - input_seqs=X_steps, - weight_dicts=weights_dicts, - acts=acts, + input_seqs=X_steps, weight_dicts=weights_dicts, acts=acts ) else: # outputs shape = [seqs_num, (batch_size, hidden_size)] - outputs, H = rnn_cell( - input_seqs=X_steps, - **weights_dicts[0], - act=acts[0], - ) + outputs, H = rnn_cell(input_seqs=X_steps, **weights_dicts[0], act=acts[0]) # output shape = (seqs_num, num_directions, batch_size, hidden_size) output = _op.expand_dims(_op.stack(outputs, axis=0), axis=1) @@ -3801,22 +3708,13 @@ class LSTM(RNN): """Operator converter for LSTM""" @classmethod - def bidir_lstm_cell( - cls, - input_seqs, - weight_dicts, - acts, - ): + def bidir_lstm_cell(cls, input_seqs, weight_dicts, acts): """ Bidirectional LSTM cell """ seq_len = len(input_seqs) forward_outputs, fw_H_t, fw_C_t = lstm_cell( - input_seqs, - **weight_dicts[0], - f_act=acts[0], - g_act=acts[1], - h_act=acts[2], + input_seqs, **weight_dicts[0], f_act=acts[0], g_act=acts[1], h_act=acts[2] ) reverse_outputs, rev_H_t, rev_C_t = lstm_cell( @@ -3896,18 +3794,12 @@ def _impl_common(cls, inputs, attr, layout): if num_directions == 2: output, H, C = LSTM.bidir_lstm_cell( - input_seqs=X_steps, - weight_dicts=weights_dicts, - acts=acts, + input_seqs=X_steps, weight_dicts=weights_dicts, acts=acts ) else: # outputs shape = [seqs_num, (batch_size, hidden_size)] outputs, H, C = lstm_cell( - input_seqs=X_steps, - **weights_dicts[0], - f_act=acts[0], - g_act=acts[1], - h_act=acts[2], + input_seqs=X_steps, **weights_dicts[0], f_act=acts[0], g_act=acts[1], h_act=acts[2] ) # output shape = (seqs_num, num_directions, batch_size, hidden_size) @@ -3926,13 +3818,7 @@ class GRU(RNN): """Operator convert for GRU""" @classmethod - def bidir_gru_cell( - cls, - input_seqs, - weight_dicts, - acts, - sequence_lens=None, - ): + def bidir_gru_cell(cls, input_seqs, weight_dicts, acts, sequence_lens=None): """ Bidirectional GRU cell """ @@ -3960,10 +3846,7 @@ def bidir_gru_cell( _op.stack([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=0) ) - return ( - _op.stack(final_outputs, axis=0), - _op.stack([fw_H_t, rev_H_t], axis=0), - ) + return (_op.stack(final_outputs, axis=0), _op.stack([fw_H_t, rev_H_t], axis=0)) @classmethod def _default_activations(cls, num_directions): @@ -4745,9 +4628,7 @@ def get_var(name, val, scan=False): scan_output_init.append(_op.zeros(shape, dtype)) # loop vars = [iter_count, scan_state, scan_out] - loop_vars = [ - _expr.var("iter", shape=(), dtype="int32"), # iteration count - ] + loop_vars = [_expr.var("iter", shape=(), dtype="int32")] # iteration count loop_vars += [ get_var(body.input[i].name, v) for i, v in enumerate(inputs) if i < num_state_inputs ] @@ -4787,9 +4668,7 @@ def body_fn(*loop_inputs): else: input_scan_exprs.append( relay.take( - inputs[i], - loop_count, - axis=scan_input_axes[i - num_state_inputs], + inputs[i], loop_count, axis=scan_input_axes[i - num_state_inputs] ) ) @@ -5271,8 +5150,10 @@ def _impl_v10(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' - f'is invalid.') + msg = ( + f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' + f"is invalid." + ) raise tvm.error.OpAttributeInvalid(msg) attr.pop("auto_pad") @@ -5368,15 +5249,7 @@ def _impl_v1(cls, inputs, attr, params): if not transB: b = _op.transpose(b, axes=(1, 0)) - result = _qnn.op.dense( - a, - b, - a_zp, - b_zp, - a_scale, - b_scale, - channels, - ) + result = _qnn.op.dense(a, b, a_zp, b_zp, a_scale, b_scale, channels) if C: result = _op.add(result, C) @@ -5625,20 +5498,9 @@ def _impl_v10(cls, inputs, attr, params): a_zp_dtype == a_dtype and b_zp_dtype == b_dtype ), "MatMulInteger: input dtype doesn't match zero point dtype" elif len(inputs) != 2: - raise AssertionError( - f"MatMulInteger op takes 2 or 4 inputs, {len(inputs)} given" - ) + raise AssertionError(f"MatMulInteger op takes 2 or 4 inputs, {len(inputs)} given") - inputs = [ - a, - a_scale, - a_zero_point, - b, - b_scale, - b_zero_point, - out_scale, - out_zero_point, - ] + inputs = [a, a_scale, a_zero_point, b, b_scale, b_zero_point, out_scale, out_zero_point] return QLinearMatMul.get_converter(10)(inputs, attr, params, expected_out_dtypes=["int32"]) @@ -5799,8 +5661,10 @@ def _impl_v10(cls, inputs, attr, params): elif attr["auto_pad"] == "NOTSET": pass else: - msg = (f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' - f'is invalid.') + msg = ( + f'Value {attr["auto_pad"]} in attribute "auto_pad" of operator Conv ' + f"is invalid." + ) raise tvm.error.OpAttributeInvalid(msg) attr.pop("auto_pad") @@ -5994,10 +5858,7 @@ class Bernoulli(OnnxOpConverter): @classmethod def _impl_v15(cls, inputs, attr, params): in_dtype = infer_type(inputs[0]).checked_type.dtype - assert in_dtype in [ - "float32", - "float64", - ], "Only float input tensor is currently supported." + assert in_dtype in ["float32", "float64"], "Only float input tensor is currently supported." # The data type for the elements of the output tensor. # if not specified, we will use the data type of the input tensor out_dtype = attr.get("dtype", None) @@ -6169,14 +6030,11 @@ def run_calculation( if weight_tensor is None: channels = infer_shape(input_tensor)[1] weight_tensor = relay.ones( - [channels], - dtype=infer_type(input_tensor).checked_type.dtype, + [channels], dtype=infer_type(input_tensor).checked_type.dtype ) loss = -relay.gather( - input_tensor, - axis=1, - indices=relay.expand_dims(normalized_target_tensor, 1), + input_tensor, axis=1, indices=relay.expand_dims(normalized_target_tensor, 1) ) loss = relay.squeeze(loss, axis=[1]) @@ -6222,10 +6080,7 @@ def _impl_v13(cls, inputs, attr, params): weight_tensor = None loss, weight_total = cls.run_calculation( - input_tensor, - target_tensor, - weight_tensor=weight_tensor, - ignore_index=ignore_index, + input_tensor, target_tensor, weight_tensor=weight_tensor, ignore_index=ignore_index ) if reduction == "mean": return relay.sum(loss) / weight_total @@ -6253,10 +6108,7 @@ def _impl_v13(cls, inputs, attr, params): log_softmax_tensor = LogSoftmax.get_converter(13)([input_tensor], log_softmax_attr, None) loss, weight_total = NegativeLogLikelihoodLoss.run_calculation( - log_softmax_tensor, - target_tensor, - weight_tensor, - ignore_index=ignore_index, + log_softmax_tensor, target_tensor, weight_tensor, ignore_index=ignore_index ) if reduction == "mean": @@ -7225,7 +7077,7 @@ def export_model(location, graph): os.makedirs(location) time_stamp = datetime.datetime.now().strftime("%m_%d_%Y_%H_%M_%S") model = helper.make_model(graph) - save(model, os.path.join(location, f"tvm_exported_model_{time_stamp}.onnx") + save(model, os.path.join(location, f"tvm_exported_model_{time_stamp}.onnx")) def from_onnx( diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index 4aa06388fb73..fdbc96676f8e 100755 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -91,11 +91,7 @@ def convert_unary_op(g, op, block): """Operator converter for all the unary operators.""" # op_map stores mapping relationship between paddlepaddle and relay - op_map = { - "isinf_v2": _op.isinf, - "isfinite_v2": _op.isfinite, - "isnan_v2": _op.isnan, - } + op_map = {"isinf_v2": _op.isinf, "isfinite_v2": _op.isfinite, "isnan_v2": _op.isnan} if op.type in op_map: unary_func = op_map[op.type] else: @@ -1456,10 +1452,7 @@ def convert_pool2d(g, op, block): input_x = g.get_node(op.input("X")[0]) _, _, in_h, in_w = infer_shape(input_x) - op_map = { - "avg": "avg_pool2d", - "max": "max_pool2d", - } + op_map = {"avg": "avg_pool2d", "max": "max_pool2d"} strides = op.attr("strides") if isinstance(strides, int): @@ -2129,10 +2122,7 @@ def convert_slice(g, op, block): if len(axes) < dims: if isinstance(strides, _expr.Expr): strides = _op.scatter_elements( - _expr.const( - np.array([1] * dims), - dtype=infer_type(strides).checked_type.dtype, - ), + _expr.const(np.array([1] * dims), dtype=infer_type(strides).checked_type.dtype), indices, strides, axis=0, @@ -2670,8 +2660,10 @@ def check_input_shape(self, op, block): ipt_shape = block.var(ipt_name).shape for i in ipt_shape: if i < 0: - warning_msg = (f"Input {ipt_name}(shape={ipt_shape}) has unkown dimension shapes. " - f"Specifying static values may improve performance") + warning_msg = ( + f"Input {ipt_name}(shape={ipt_shape}) has unkown dimension shapes. " + f"Specifying static values may improve performance" + ) warnings.warn(warning_msg) def check_unsupported_ops(self, program): diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index 328f075725bc..078178a7e095 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -388,7 +388,7 @@ def _get_type(val, inp_type): stop = _get_value(inputs[1 if len(inputs) > 5 else 0], dtype) step = _get_value(inputs[2], dtype) if len(inputs) > 6 else _expr.const(1, dtype) else: - msg = f"Unknown number of arguments ({len(inputs}) to parse." + msg = f"Unknown number of arguments ({len(inputs)}) to parse." raise AssertionError(msg) return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype) @@ -585,8 +585,10 @@ def tensor_split(self, inputs, input_types): import torch if not isinstance(inputs[1], (int, list, tuple, torch.Tensor)): - msg = (f"indices_or_sections type {type(inputs[1])} could not be parsed in " - f"tensor_split op") + msg = ( + f"indices_or_sections type {type(inputs[1])} could not be parsed in " + f"tensor_split op" + ) raise AssertionError(msg) if isinstance(inputs[1], torch.Tensor) and not ( @@ -2151,13 +2153,7 @@ def to(self, inputs, input_types): # special handling for aten::to(data, 6, _, _, _) case # 6 means dtype = float # this happens when converting upsampling with scale factor - cast_map = { - 5: "float16", - 6: "float32", - 7: "float64", - 3: "int32", - 4: "int64", - } + cast_map = {5: "float16", 6: "float32", 7: "float64", 3: "int32", 4: "int64"} cast_func = {5: float, 6: float, 7: float, 3: int, 4: int} @@ -2666,12 +2662,7 @@ def index_put(self, inputs, input_types): def scalar_tensor(self, inputs, input_types): data = inputs[0] - cast_map = { - 6: "float32", - 7: "float64", - 3: "int32", - 4: "int64", - } + cast_map = {6: "float32", 7: "float64", 3: "int32", 4: "int64"} type_key = inputs[1] if isinstance(data, _expr.Constant): data = data.data.numpy().tolist() @@ -2848,7 +2839,9 @@ def scatter_reduce(self, inputs, input_types): ), "Index dim size should be less than data one" red_valids = ["sum", "prod", "mean", "amax", "amin"] - assert reduce in red_valids, f"Only {red_valids} modes are supported, but {reduce} is gotten" + assert ( + reduce in red_valids + ), f"Only {red_valids} modes are supported, but {reduce} is gotten" if reduce == "sum": reduce = "add" elif reduce == "prod": @@ -3115,11 +3108,7 @@ def rnn(self, inputs, input_types, nonlinearity): len(layer_weights_dicts) == num_layers and k == num_layers ), "For stacked RNN number of weights sets should be the same as number of layers!" output, out_hidden_state = self.rnn_layers( - X, - layer_weights_dicts, - bidirectional, - act, - dropout_p=dropout_p, + X, layer_weights_dicts, bidirectional, act, dropout_p=dropout_p ) # output shape = (seq_num, batch, hidden_size) or @@ -3129,25 +3118,14 @@ def rnn(self, inputs, input_types, nonlinearity): return (output, out_hidden_state) - def bidir_gru_cell( - self, - input_seqs, - weights_dicts, - ): + def bidir_gru_cell(self, input_seqs, weights_dicts): """ Bidirectional GRU cell """ seq_len = len(input_seqs) - forward_outputs, fw_H_t = gru_cell( - input_seqs, - **weights_dicts[0], - ) + forward_outputs, fw_H_t = gru_cell(input_seqs, **weights_dicts[0]) - reverse_outputs, rev_H_t = gru_cell( - input_seqs, - **weights_dicts[1], - backwards=True, - ) + reverse_outputs, rev_H_t = gru_cell(input_seqs, **weights_dicts[1], backwards=True) final_outputs = [] for i in range(seq_len): @@ -3302,10 +3280,7 @@ def gru(self, inputs, input_types): ), "For stacked GRU number of weights sets should be the same as number of layers!" output, out_hidden_state = self.gru_layers( - X, - layer_weights_dicts, - bidirectional, - dropout_p=dropout_p, + X, layer_weights_dicts, bidirectional, dropout_p=dropout_p ) # output shape = (seq_num, batch, hidden_size) or @@ -3315,24 +3290,15 @@ def gru(self, inputs, input_types): return (output, out_hidden_state) - def bidir_lstm_cell( - self, - input_seqs, - weights_dicts, - ): + def bidir_lstm_cell(self, input_seqs, weights_dicts): """ Bidirectional LSTM cell """ seq_len = len(input_seqs) - forward_outputs, fw_H_t, fw_C_t = lstm_cell( - input_seqs, - **weights_dicts[0], - ) + forward_outputs, fw_H_t, fw_C_t = lstm_cell(input_seqs, **weights_dicts[0]) reverse_outputs, rev_H_t, rev_C_t = lstm_cell( - input_seqs, - **weights_dicts[1], - backwards=True, + input_seqs, **weights_dicts[1], backwards=True ) final_outputs = [] @@ -3549,11 +3515,7 @@ def lstm(self, inputs, input_types): ), "For stacked LSTM number of weights sets should be the same as number of layers!" outputs = self.lstm_layers( - X, - layer_weights_dicts, - bidirectional, - dtype=X_dtype, - dropout_p=dropout_p, + X, layer_weights_dicts, bidirectional, dtype=X_dtype, dropout_p=dropout_p ) # output shape = (seq_num, batch, hidden_size) or @@ -3635,9 +3597,7 @@ def slide_axes(inp, shape, ax): # First fill in the last axis with roll indices, and then do transpose to # bring the roll indices into the desired axis. indices = slide_axes( - _op.tile(indices_1d, shape[:dim] + shape[dim + 1 :] + (1,)), - shape, - dim, + _op.tile(indices_1d, shape[:dim] + shape[dim + 1 :] + (1,)), shape, dim ) out = _op.gather(out, dim, indices) @@ -4049,10 +4009,7 @@ def report_missing_conversion(self, op_names): def convert_block(self, block, outputs): """Translate Torch "Block", used for prim::If and prim::Loop""" ops = _get_operator_nodes( - block.nodes(), - self.source_map, - self.op_type_dict, - self.use_parser_friendly_name, + block.nodes(), self.source_map, self.op_type_dict, self.use_parser_friendly_name ) ret_names = _get_input_names(block.returnNode()) return self.convert_operators(ops, outputs, ret_names) @@ -4215,8 +4172,7 @@ def convert_operators(self, operators, outputs, ret_names): outputs[node_name] = _get_constant(op_node) elif operator == "prim::ListConstruct" and _should_construct_dynamic_list(op_node): outputs[node_name] = set_span( - self.convert_to_list_adt(inputs), - self.source_map[op_node], + self.convert_to_list_adt(inputs), self.source_map[op_node] ) elif operator == "prim::ListConstruct": # This assumes that no more elements will be appended to this list @@ -4235,8 +4191,7 @@ def _handel_nested_input(inputs): return _expr.Tuple(inputs_list) outputs[node_name] = set_span( - _handel_nested_input(inputs), - self.source_map[op_node], + _handel_nested_input(inputs), self.source_map[op_node] ) elif operator in ["prim::ListUnpack", "prim::TupleUnpack"]: assert len(inputs) == 1 @@ -4624,12 +4579,7 @@ def _traverse_graph(nodes): return source_map -def _get_operator_nodes( - nodes, - source_map=None, - op_type_dict=None, - use_parser_friendly_name=False, -): +def _get_operator_nodes(nodes, source_map=None, op_type_dict=None, use_parser_friendly_name=False): """Returns torch IR nodes that need conversion to Relay""" ops, should_rename_graph = [], all([source_map, op_type_dict]) is not None @@ -4835,10 +4785,7 @@ def convert_params(graph, state_dict, source_map, use_parser_friendly_name=False # set variable name by concatenating first consumer's name with full attribute # e.g. "aten::batch_norm_5.running_mean" var_name = attr_name_sep.join( - [ - source_map[_get_users(getattrs[-1])[0]], - full_attr.split(attr_name_sep)[-1], - ] + [source_map[_get_users(getattrs[-1])[0]], full_attr.split(attr_name_sep)[-1]] ) if full_attr.endswith("_packed_params"): # for quantized models @@ -5011,10 +4958,7 @@ def from_pytorch( converter.update_convert_map(qnn_torch.convert_map) operator_nodes = _get_operator_nodes( - graph.nodes(), - converter.source_map, - converter.op_type_dict, - use_parser_friendly_name, + graph.nodes(), converter.source_map, converter.op_type_dict, use_parser_friendly_name ) ret_name = _get_input_names(graph.return_node()) outputs = converter.convert_operators(operator_nodes, outputs, ret_name) diff --git a/python/tvm/relay/frontend/qnn_torch.py b/python/tvm/relay/frontend/qnn_torch.py index f5628a5919bc..13e426ccd7a0 100644 --- a/python/tvm/relay/frontend/qnn_torch.py +++ b/python/tvm/relay/frontend/qnn_torch.py @@ -46,16 +46,7 @@ class ConvPackedParam(QNNParam): """ def __init__( - self, - weight_np, - bias, - scale, - zero_point, - stride, - padding, - dilation, - groups, - output_padding, + self, weight_np, bias, scale, zero_point, stride, padding, dilation, groups, output_padding ): super().__init__(weight_np, bias, scale, zero_point) self.stride = stride @@ -95,15 +86,7 @@ def make_conv_packed_param(qweight, bias, packed_params): groups = packed_params.groups() output_padding = packed_params.output_padding() return ConvPackedParam( - weight_np, - bias, - scale, - zero_point, - stride, - padding, - dilation, - groups, - output_padding, + weight_np, bias, scale, zero_point, stride, padding, dilation, groups, output_padding ) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index d17b6a01132f..e12542f8e276 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -732,9 +732,7 @@ def _parse_param(self, key, value, name, shape): ] else: if key not in ("dtype", "_output_shapes", "_class"): - raise NotImplementedError( - f"Other attributes for a Const(param) Node {key} ? ." - ) + raise NotImplementedError(f"Other attributes for a Const(param) Node {key} ? .") def _get_attr(self, buf): """Returns the value of the attr of this buf with the given `name`. diff --git a/python/tvm/relay/frontend/tensorflow2.py b/python/tvm/relay/frontend/tensorflow2.py index c4bc90367874..e6ad1f7805af 100644 --- a/python/tvm/relay/frontend/tensorflow2.py +++ b/python/tvm/relay/frontend/tensorflow2.py @@ -50,9 +50,7 @@ # A map to record tensor list write ops and input tl/tensor indices # Value is (index of tensor list, index of written node) -_tensor_list_write_ops = { - "TensorListSetItem": (0, 2), -} +_tensor_list_write_ops = {"TensorListSetItem": (0, 2)} def _infer_type_with_prelude(val, prelude): @@ -453,12 +451,7 @@ def _convert_operator(self, graph, op_name, node_name, inputs, attrs): """ if op_name in ["PartitionedCall", "StatefulPartitionedCall"]: sym = _partition_call_operator( - self._module, - graph, - inputs, - attrs, - self._prelude, - gdef_lib=self._gdef_lib, + self._module, graph, inputs, attrs, self._prelude, gdef_lib=self._gdef_lib ) elif op_name in ["StatelessIf", "If"]: sym = _convert_if( @@ -649,8 +642,7 @@ def convert_vars(loop_inputs, input_signature): return new_vars while_func = next( - (f for f in graph.library.function if f.signature.name == attr["body"].name), - None, + (f for f in graph.library.function if f.signature.name == attr["body"].name), None ) loop_inputs = convert_vars(inputs, while_func.signature.input_arg) @@ -722,10 +714,7 @@ def _convert_function( @func___inference_add_95(%x) """ - func = next( - (f for f in graph.library.function if f.signature.name == node_func_name), - None, - ) + func = next((f for f in graph.library.function if f.signature.name == node_func_name), None) if func is None: raise Exception(f"Function not found - {node_func_name}") devices = set(node.device for node in func.node_def) diff --git a/python/tvm/relay/frontend/tensorflow_ops.py b/python/tvm/relay/frontend/tensorflow_ops.py index b1a92222887e..014d0065fce1 100644 --- a/python/tvm/relay/frontend/tensorflow_ops.py +++ b/python/tvm/relay/frontend/tensorflow_ops.py @@ -167,9 +167,7 @@ def _impl(inputs, attr, params, mod): # support the case where it inputs from a scalar constant. axis_input_value = [_get_num_param(params, inputs[1])] except (IndexError, KeyError): - raise TypeError( - f"Unsupported argument for `{func_name}` : `axis` should be a constant" - ) + raise TypeError(f"Unsupported argument for `{func_name}` : `axis` should be a constant") out = func(inputs[0], axis=axis_input_value, keepdims=False) dtype = attr["output_type"].name if dtype != "int32": @@ -201,8 +199,10 @@ def _impl(inputs, attr, params, mod): attr["kernel_shape"] = (attr["ksize"][2], attr["ksize"][3], attr["ksize"][4]) attr["strides"] = (attr["strides"][2], attr["strides"][3], attr["strides"][4]) else: - msg = (f'Value {attr["data_format"]} of attribute "data_format" of operator Pooling ' - f"is not valid.") + msg = ( + f'Value {attr["data_format"]} of attribute "data_format" of operator Pooling ' + f"is not valid." + ) raise tvm.error.OpAttributeInvalid(msg) if attr["data_format"] == "NDHWC": input_shape = [_infer_shape(inputs[0], mod)[i] for i in (0, 4, 1, 2, 3)] @@ -231,8 +231,10 @@ def _impl(inputs, attr, params, mod): attr["padding"] = [pad_d[0], pad_v[0], pad_h[0], pad_d[1], pad_v[1], pad_h[1]] else: - msg = (f'Value {attr["padding"]} in attribute "padding" of operator Pooling is ' - f"not valid.") + msg = ( + f'Value {attr["padding"]} in attribute "padding" of operator Pooling is ' + f"not valid." + ) raise tvm.error.OpAttributeInvalid(msg) if name == "avg_pool": @@ -265,8 +267,10 @@ def _impl(inputs, attr, params, mod): attr["kernel_shape"] = (attr["ksize"][2], attr["ksize"][3]) attr["strides"] = (attr["strides"][2], attr["strides"][3]) else: - msg = (f'Value {attr["data_format"]} of attribute "data_format" of operator Pooling ' - f"is not valid.") + msg = ( + f'Value {attr["data_format"]} of attribute "data_format" of operator Pooling ' + f"is not valid." + ) raise tvm.error.OpAttributeInvalid(msg) if attr["_target_layout"] == "NCHW" and attr["data_format"] == "NHWC": @@ -303,8 +307,10 @@ def _impl(inputs, attr, params, mod): else: attr["padding"] = [paddings[4], paddings[6], paddings[5], paddings[7]] else: - msg = (f'Value {attr["padding"]} in attribute "padding" of operator Pooling is ' - f"not valid.") + msg = ( + f'Value {attr["padding"]} in attribute "padding" of operator Pooling is ' + f"not valid." + ) raise tvm.error.OpAttributeInvalid(msg) if name == "avg_pool": @@ -414,8 +420,10 @@ def _impl(inputs, attr, params, mod): attr["dilations"] = (attr["dilations"][2], attr["dilations"][3]) attr["strides"] = (attr["strides"][2], attr["strides"][3]) else: - msg = (f'Value {attr["data_format"]} in attribute "data_format" of operator Conv is ' - f"not valid.") + msg = ( + f'Value {attr["data_format"]} in attribute "data_format" of operator Conv is ' + f"not valid." + ) raise tvm.error.OpAttributeInvalid(msg) if opname == "depthwise": @@ -462,8 +470,9 @@ def _impl(inputs, attr, params, mod): else: attr["padding"] = [paddings[4], paddings[6], paddings[5], paddings[7]] else: - msg = (f'Value {attr["padding"]} in attribute "padding" of operator Conv is not ' - f"valid.") + msg = ( + f'Value {attr["padding"]} in attribute "padding" of operator Conv is not ' f"valid." + ) raise tvm.error.OpAttributeInvalid(msg) if "kernel_layout" not in attr: @@ -521,8 +530,10 @@ def _impl(inputs, attr, params, mod): attr["dilations"] = (attr["dilations"][1], attr["dilations"][2]) attr["strides"] = (attr["strides"][1], attr["strides"][2]) else: - msg = (f'Value {attr["data_format"]} in attribute "data_format" of operator Dilation2D is ' - f"not valid.") + msg = ( + f'Value {attr["data_format"]} in attribute "data_format" of operator Dilation2D is ' + f"not valid." + ) raise tvm.error.OpAttributeInvalid(msg) attr["padding"] = attr["padding"].decode("utf-8") @@ -562,17 +573,17 @@ def _impl(inputs, attr, params, mod): attr["padding"] = [0, 0] else: - msg = (f'Value {attr["padding"]} in attribute "padding" of operator Dilation2d is not ' - f"valid.") + msg = ( + f'Value {attr["padding"]} in attribute "padding" of operator Dilation2d is not ' + f"valid." + ) raise tvm.error.OpAttributeInvalid(msg) attr["kernel_layout"] = "HWI" if attr["data_format"] == "NHWC" else "IHW" out = AttrCvt( op_name="dilation2d", ignores=["explicit_paddings", "rates"], - transforms={ - "data_format": "data_layout", - }, + transforms={"data_format": "data_layout"}, )([inputs[0], inputs[1]], attr) if attr["_target_layout"] == "NCHW": out = _op.transpose(out, axes=(0, 2, 3, 1)) @@ -639,8 +650,10 @@ def _impl(inputs, attr, params, mod): ) attr["strides"] = (attr["strides"][2], attr["strides"][3], attr["strides"][4]) else: - msg = (f'Value {attr["data_format"]} in attribute "data_format" of operator Conv is ' - f"not valid.") + msg = ( + f'Value {attr["data_format"]} in attribute "data_format" of operator Conv is ' + f"not valid." + ) raise tvm.error.OpAttributeInvalid(msg) # Fix padding @@ -698,8 +711,9 @@ def _impl(inputs, attr, params, mod): paddings[9], ] else: - msg = (f'Value {attr["padding"]} in attribute "padding" of operator Conv is not ' - f"valid.") + msg = ( + f'Value {attr["padding"]} in attribute "padding" of operator Conv is not ' f"valid." + ) raise tvm.error.OpAttributeInvalid(msg) if "kernel_layout" not in attr: @@ -826,7 +840,7 @@ def convert_combined_nms_with_all_class_nms( clip_boxes, ): """Converts TF combined_nms using Relay all_class_max_suppression op""" - (selected_indices, selected_scores, num_detections,) = _op.vision.all_class_non_max_suppression( + (selected_indices, selected_scores, num_detections) = _op.vision.all_class_non_max_suppression( boxes, scores, max_output_boxes_per_class, @@ -1156,11 +1170,7 @@ def _impl(inputs, attr, params, mod): extras={"units": channels}, ignores=["transpose_a", "transpose_b", "T"], )(inputs, attr) - return AttrCvt( - op_name="matmul", - extras={"units": channels}, - ignores=["T"], - )(inputs, attr) + return AttrCvt(op_name="matmul", extras={"units": channels}, ignores=["T"])(inputs, attr) return _impl @@ -1336,8 +1346,7 @@ def _impl(inputs, attr, params, mod): ) return _expr.TupleWrapper( - _expr.Tuple([new_sparse_indices, new_sparse_values, empty_row_indicator]), - 3, + _expr.Tuple([new_sparse_indices, new_sparse_values, empty_row_indicator]), 3 ) return _impl @@ -1685,7 +1694,9 @@ def _impl(inputs, attr, params, prelude): write_func = prelude.get_global_var("tensor_array_write", dtype_str) else: input_ta_rank = len(input_ta_shape) - assert input_ta_rank == input_rank, f"Shape rank mismatch: {input_ta_rank} vs {input_rank}" + assert ( + input_ta_rank == input_rank + ), f"Shape rank mismatch: {input_ta_rank} vs {input_rank}" static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_ta_shape) static_tensor_array_ops.register() tensor_func = static_tensor_array_ops.get_ctor("tensor_constructor") @@ -1749,7 +1760,9 @@ def _impl(inputs, attr, params, prelude): split_func = prelude.get_global_var("tensor_array_split", dtype_str) else: input_ta_rank = len(input_ta_shape) - assert input_ta_rank == input_rank, f"Shape rank mismatch: {input_ta_rank} vs {input_rank}" + assert ( + input_ta_rank == input_rank + ), f"Shape rank mismatch: {input_ta_rank} vs {input_rank}" static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_ta_shape) static_tensor_array_ops.register() @@ -1931,7 +1944,9 @@ def _impl(inputs, attr, params, mod): elif s0[s0_size - i] == 1: out.appendleft(s1[s1_size - i]) else: - assert s1[s1_size - i] == 1, f"Incompatible broadcast type {0[s0_size - i] and {s1[s1_size - i]}" + assert ( + s1[s1_size - i] == 1 + ), f"Incompatible broadcast type {0[s0_size - i]} and {s1[s1_size - i]}" out.appendleft(s0[s0_size - i]) if s0_size < s1_size: for i in range(s0_size + 1, s1_size + 1): @@ -2351,10 +2366,7 @@ def _impl(inputs, attr, params, mod): attr["pad_value"] = _get_num_param(params, inputs[2]) except (IndexError, KeyError, AttributeError): attr["pad_value"] = inputs[2] - return AttrCvt( - op_name="pad", - ignores=["Tpaddings"], - )(new_inputs, attr) + return AttrCvt(op_name="pad", ignores=["Tpaddings"])(new_inputs, attr) return _impl @@ -2367,10 +2379,7 @@ def _impl(inputs, attr, params, mod): mode = attr["mode"].decode("utf-8") attr["mode"] = mode new_inputs = [inputs[0]] - return AttrCvt( - op_name="mirror_pad", - ignores=["Tpaddings"], - )(new_inputs, attr) + return AttrCvt(op_name="mirror_pad", ignores=["Tpaddings"])(new_inputs, attr) return _impl @@ -2877,17 +2886,13 @@ def _impl(inputs, attr, params, mod): unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size") counts_sliced = _op.strided_slice(counts, begin=[0], end=num_uniq, slice_mode="size") return _expr.TupleWrapper( - _expr.Tuple([unique_sliced, inverse_indices, counts_sliced]), - 3, + _expr.Tuple([unique_sliced, inverse_indices, counts_sliced]), 3 ) [unique, _, inverse_indices, num_uniq] = _op.unique( data, is_sorted=False, return_counts=False ) unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size") - return _expr.TupleWrapper( - _expr.Tuple([unique_sliced, inverse_indices]), - 2, - ) + return _expr.TupleWrapper(_expr.Tuple([unique_sliced, inverse_indices]), 2) return _impl diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index 7bc5d70bc9b7..f5d9b5bbf29a 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -219,8 +219,8 @@ def check_unsupported_ops(self): ops = str(list(dynamic_range_ops_set)).strip("[,]") raise_msg += ( f"The following operators are likely to have dynamic range quantization: {ops}. " - f"If you are running an optimized graph, please turn off dynamic range quantization " - f"or use full integer quantization" + f"If you are running an optimized graph, please turn off dynamic range " + f"quantization or use full integer quantization" ) if len(raise_msg) > 0: @@ -413,9 +413,7 @@ def get_tensors(self, tensors_idx_list): # has zero scale and zero zero point. This is not used. is_qnn_params_valid = False else: - raise NotImplementedError( - f"Quantized type {type(tflite_scale)} not supported" - ) + raise NotImplementedError(f"Quantized type {type(tflite_scale)} not supported") # Check that the scale and zero points are valid. if is_qnn_params_valid: @@ -490,9 +488,7 @@ def get_tensor_type_str(self, tensor_type): return "int64" if tensor_type == TensorType.BOOL: return "bool" - raise NotImplementedError( - f"Tensor type {str(tensor_type)} is currently not supported" - ) + raise NotImplementedError(f"Tensor type {str(tensor_type)} is currently not supported") def flatten_to_nd(self, x, x_shape, nd=3): """Flatten input tensor to nd rank""" @@ -1314,13 +1310,7 @@ def convert_square(self, op): return out - def _convert_elemwise( - self, - relay_op, - op, - ignore_qnn_params=False, - comparison_op=False, - ): + def _convert_elemwise(self, relay_op, op, ignore_qnn_params=False, comparison_op=False): """Generic method to Convert TFLite elemwise""" try: from tflite.AddOptions import AddOptions @@ -3045,22 +3035,10 @@ def convert_batch_matmul(self, op): ) a_broadcasted_shape = _fold_constant( - _op.concatenate( - [ - out_batch, - _op.strided_slice(shape_a, [rank_a - 2], [rank_a]), - ], - 0, - ) + _op.concatenate([out_batch, _op.strided_slice(shape_a, [rank_a - 2], [rank_a])], 0) ) b_broadcasted_shape = _fold_constant( - _op.concatenate( - [ - out_batch, - _op.strided_slice(shape_b, [rank_b - 2], [rank_b]), - ], - 0, - ) + _op.concatenate([out_batch, _op.strided_slice(shape_b, [rank_b - 2], [rank_b])], 0) ) if not tvm.ir.structural_equal(shape_a, a_broadcasted_shape): input_a = _op.transform.broadcast_to(a, a_broadcasted_shape)