Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions python/tvm/relay/op/contrib/ethosn.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,16 @@ def qnn_add_pattern():

return input_is_left | input_is_right | two_inputs

def qnn_conv2d_transpose_pattern():
pattern = is_op("qnn.conv2d_transpose")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
).has_attr({"data_layout": "NHWC"})
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
pattern = is_op("qnn.requantize")(
pattern, is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern

def check_conv2d(extract):
"""Check if a conv2d is supported by Ethos-N."""
if not ethosn_available():
Expand Down Expand Up @@ -261,6 +271,13 @@ def check_mean(extract):

return _ethosn.mean(extract)

def check_conv2d_transpose(extract):
"""Check if conv2d_transpose is supported by Ethos-N."""
if not ethosn_available():
return False

return _ethosn.conv2d_transpose(extract)

def check_sigmoid(extract):
"""Check if a sigmoid is supported by Ethos-N."""
if not ethosn_available():
Expand Down Expand Up @@ -326,6 +343,7 @@ def check_add(extract):
("ethos-n.qnn_mul", qnn_mul_pattern(), check_mul),
("ethos-n.qnn_add", qnn_add_pattern(), check_add),
("ethos-n.qnn_conv2d", qnn_conv_pattern(), check_conv2d),
("ethos-n.qnn_conv2d_transpose", qnn_conv2d_transpose_pattern(), check_conv2d_transpose),
("ethos-n.qnn_avg_pool2d", qnn_avg_pool2d_pattern(), check_avg_pool2d),
("ethos-n.qnn_sigmoid", qnn_sigmoid_pattern(), check_sigmoid),
("ethos-n.qnn_fc", qnn_fc_pattern(), check_fc),
Expand Down
39 changes: 39 additions & 0 deletions src/relay/backend/contrib/ethosn/codegen.cc
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,10 @@ void InferTensorsVisitor::InferCall(const CallNode* cn) {
LeakyReLUParams params;
err += EthosnAPI::LeakyReLU(cn->op.as<FunctionNode>()->body, &params);
tensor_table_[cn->args[0]] = {params.input_info};
} else if (IsEthosnFunc(call, "ethos-n.qnn_conv2d_transpose")) {
QnnConv2dTransposeParams params;
err += EthosnAPI::QnnConv2dTranspose(cn->op.as<FunctionNode>()->body, &params);
tensor_table_[cn->args[0]] = {params.input_info};
} else if (IsEthosnOp(call, "qnn.concatenate")) {
ConcatenateParams params;
err = EthosnAPI::Concatenate(call, &params);
Expand Down Expand Up @@ -311,6 +315,9 @@ sl::TensorsAndId ConstructNetworkVisitor::HandleCall(const CallNode* cn) {
} else if (IsEthosnFunc(call, "ethos-n.qnn_leaky_relu")) {
if ((err = MakeLeakyReLULayer(call, &tensor))) ReportFatalError(call, err);
return MakeOps(tensor);
} else if (IsEthosnFunc(call, "ethos-n.qnn_conv2d_transpose")) {
if ((err = MakeConv2DTransposeLayer(call, &tensor))) ReportFatalError(call, err);
return MakeOps(tensor);
} else if (IsEthosnOp(call, "qnn.concatenate")) {
if ((err = MakeConcatenateLayer(call, &tensor))) ReportFatalError(call, err);
return MakeOps(tensor);
Expand Down Expand Up @@ -537,6 +544,24 @@ EthosnError ConstructNetworkVisitor::MakeLeakyReLULayer(const Call& call,
return EthosnError();
}

EthosnError ConstructNetworkVisitor::MakeConv2DTransposeLayer(const Call& call,
sl::TensorAndId<sl::Operand>* out) {
QnnConv2dTransposeParams params;
if (auto err = EthosnAPI::QnnConv2dTranspose(call->op.as<FunctionNode>()->body, &params)) {
return err;
}

auto activation = operand_table_[call->args[0]][0];
auto weights = AddConstant(network_, params.weights_info, params.raw_weights->data).tensor;
auto bias = AddConstant(network_, params.bias_info, params.raw_bias->data).tensor;
try {
*out = AddTransposeConvolution(network_, *activation, *bias, *weights, params.conv_info);
} catch (const sl::NotSupportedException& e) {
return EthosnError(e.what());
}
return EthosnError();
}

EthosnError ConstructNetworkVisitor::MakeConcatenateLayer(const Call& call,
sl::TensorAndId<sl::Operand>* out) {
ConcatenateParams params;
Expand Down Expand Up @@ -913,6 +938,20 @@ TVM_REGISTER_GLOBAL("relay.ethos-n.support.leaky_relu")
err += EthosnError(reason);
});

TVM_REGISTER_GLOBAL("relay.ethos-n.support.conv2d_transpose")
.set_body([](tvm::TVMArgs args, tvm::TVMRetValue* rv) {
Call call = args[0];
QnnConv2dTransposeParams params;
auto err = EthosnAPI::QnnConv2dTranspose(call, &params);
err += EthosnCompiler::SupportedSetup();
char reason[kReasonMaxLength];
reason[0] = '\0';
*rv = !err && EthosnCompiler::GetSupported()->IsTransposeConvolutionSupported(
params.bias_info, params.weights_info, params.conv_info, params.input_info,
&params.output_info, reason, sizeof(reason));
err += EthosnError(reason);
});

TVM_REGISTER_GLOBAL("relay.ethos-n.support.concatenate")
.set_body([](tvm::TVMArgs args, tvm::TVMRetValue* rv) {
Call call = args[0];
Expand Down
1 change: 1 addition & 0 deletions src/relay/backend/contrib/ethosn/codegen_ethosn.h
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,7 @@ class ConstructNetworkVisitor : public MixedModeVisitor, private ErrorReportingP
EthosnError MakeSigmoidLayer(const Call& call, sl::TensorAndId<sl::Operand>* out);
EthosnError MakeMeanLayer(const Call& call, sl::TensorAndId<sl::Operand>* out);
EthosnError MakeTanhLayer(const Call& call, sl::TensorAndId<sl::Operand>* out);
EthosnError MakeConv2DTransposeLayer(const Call& call, sl::TensorAndId<sl::Operand>* out);
EthosnError MakeConcatenateLayer(const Call& call, sl::TensorAndId<sl::Operand>* out);
EthosnError MakeSplitLayer(const Call& call, sl::TensorsAndId* outs);
EthosnError MakeDepthToSpaceLayer(const Call& call, sl::TensorAndId<sl::Operand>* out);
Expand Down
15 changes: 1 addition & 14 deletions src/relay/backend/contrib/ethosn/convert_equivalent.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,26 +32,13 @@
#include "../../../qnn/utils.h"
#include "../../../transforms/pattern_utils.h"
#include "../../../transforms/simplify_expr.h"
#include "ethosn_api.h"

namespace tvm {
namespace relay {
namespace contrib {
namespace ethosn {

/*!
* \brief Apply constant folding on an expression.
*
* \param expr The expression to fold.
* \param fold_qnn Whether to fold constants for QNN operations.
* \returns The new folded expression.
*/
Expr FoldConstantExpr(const Expr& expr, bool fold_qnn = true) {
auto mod = IRModule::FromExpr(expr);
mod = transform::FoldConstant(fold_qnn)(mod);
auto entry_func = Downcast<Function>(mod->Lookup("main"));
return expr.as<FunctionNode>() == nullptr ? entry_func->body : entry_func;
}

/*!
* \brief Converts qnn.mul to mathematically equivalent
* qnn.conv2d depthwise operation.
Expand Down
126 changes: 126 additions & 0 deletions src/relay/backend/contrib/ethosn/ethosn_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

#include "ethosn_api.h"

#include <tvm/relay/analysis.h>
#include <tvm/relay/attrs/image.h>
#include <tvm/relay/attrs/nn.h>
#include <tvm/relay/expr.h>
Expand All @@ -37,6 +38,9 @@
#include <utility>
#include <vector>

#include "../../../op/make_op.h"
#include "../../../transforms/pattern_utils.h"
#include "../../../transforms/simplify_expr.h"
#include "ethosn_support_library/Support.hpp"
#include "ethosn_support_library/SupportQueries.hpp"
#include "tvm/relay/qnn/attrs.h"
Expand Down Expand Up @@ -445,6 +449,121 @@ EthosnError EthosnAPI::Mean(const Expr& expr, MeanParams* params) {
return err;
}

Constant TransposeWeights(const Constant& data, const std::string& input_layout) {
int pos_h = input_layout.find("H");
int pos_w = input_layout.find("W");
int pos_i = input_layout.find("I");
int pos_o = input_layout.find("O");

// Currently the expected target layout is HWIO only.
Array<Integer> target_shape = {pos_h, pos_w, pos_i, pos_o};

Expr transpose = MakeTranspose(data, target_shape);
transpose = InferType(FoldConstantExpr(transpose));
Constant transposed_data = Downcast<Constant>(transpose);
return transposed_data;
}

EthosnError EthosnAPI::QnnConv2dTranspose(const Expr& expr, QnnConv2dTransposeParams* params) {
Call requantize = Downcast<Call>(expr);
Call bias;
Call conv2d_transpose;
if (requantize->args[0]->IsInstance<CallNode>() &&
Downcast<Call>(requantize->args[0])->op == Op::Get("nn.bias_add")) {
bias = Downcast<Call>(requantize->args[0]);
conv2d_transpose = Downcast<Call>(bias->args[0]);
} else {
conv2d_transpose = Downcast<Call>(requantize->args[0]);
}
const auto& conv_attr = conv2d_transpose->attrs.as<Conv2DTransposeAttrs>();
ICHECK(conv_attr) << "Expected type Conv2DTransposeAttrs but was "
<< conv2d_transpose->attrs->GetTypeKey();

int input_zero_point;
int kernel_zero_point;
int output_zero_point;
std::valarray<float> input_scale;
std::valarray<float> kernel_scale;
float output_scale;
unsigned int qaxis = conv_attr->kernel_layout.find("O");

EthosnError err = AsConstant(conv2d_transpose->args[2], &input_zero_point);
err += AsConstant(conv2d_transpose->args[3], &kernel_zero_point);
err += AsConstant(requantize->args[4], &output_zero_point);
err += AsConstant(conv2d_transpose->args[4], &input_scale);
err += AsConstant(conv2d_transpose->args[5], &kernel_scale);
err += AsConstant(requantize->args[3], &output_scale);

// Convert quantization params
sl::QuantizationInfo input_q_info;
sl::QuantizationInfo weights_q_info;
sl::QuantizationInfo bias_q_info;
sl::QuantizationInfo output_q_info;
err += Tvm2Npu(input_zero_point, input_scale, qaxis, &input_q_info);
err += Tvm2Npu(kernel_zero_point, kernel_scale, qaxis, &weights_q_info);
std::valarray<float> bias_scales = input_q_info.GetScales() * weights_q_info.GetScales();
err += Tvm2Npu(0, bias_scales, 3, &bias_q_info);
err += Tvm2Npu(output_zero_point, output_scale, &output_q_info);

// Convert convolution attributes
sl::Padding padding;
err += Tvm2Npu(conv_attr->padding, &padding);
sl::Stride stride;
err += Tvm2Npu(conv_attr->strides, &stride);
// Dilation is not supported
std::array<uint32_t, 2> dilation = {1, 1};
AsArray(conv_attr->dilation, &dilation);
if (conv_attr->dilation.size() != 2 || dilation[0] != 1 || dilation[1] != 1) {
err +=
EthosnError(ErrStrm() << "dilation=" << conv_attr->dilation << ", dilation must = [1, 1]");
}

// Create convolution info
params->conv_info = sl::ConvolutionInfo(padding, stride, output_q_info);

// Create input info
sl::TensorInfo input_tensor_info;
err += Tvm2Npu(conv2d_transpose->args[0]->checked_type(), &input_tensor_info);
input_tensor_info.m_QuantizationInfo = input_q_info;
params->input_info = input_tensor_info;

// Create weights info
Constant weights_data = Downcast<Constant>(conv2d_transpose->args[1]);
if (conv_attr->kernel_layout != "HWIO") {
weights_data = TransposeWeights(weights_data, conv_attr->kernel_layout);
}
const auto* weights_ttype = weights_data->checked_type().as<TensorTypeNode>();
sl::TensorShape weights_tensor_shape;
sl::DataType weights_data_type;
sl::DataFormat weights_data_format;
// Ignore the error here because weights don't have a batch axis
Tvm2Npu(weights_ttype->shape, &weights_tensor_shape);
err += Tvm2Npu(weights_ttype->dtype, &weights_data_type);
err += Tvm2Npu("HWIO", &weights_data_format);
params->weights_info =
sl::TensorInfo(weights_tensor_shape, weights_data_type, weights_data_format, weights_q_info);

params->raw_weights = weights_data->data;

// Create bias info
unsigned int out_channels = Downcast<IntImm>(conv_attr->channels)->value;
params->bias_info = sl::TensorInfo({1, 1, 1, out_channels}, sl::DataType::INT32_QUANTIZED,
sl::DataFormat::NHWC, bias_q_info);
if (bias.defined()) {
params->raw_bias = Downcast<Constant>(bias->args[1])->data;
} else {
params->raw_bias = MakeConstantZeros(tvm::DataType::Int(32), {1, 1, 1, out_channels})->data;
}

// Create output info
sl::TensorInfo output_tensor_info;
err += Tvm2Npu(requantize->checked_type(), &output_tensor_info);
output_tensor_info.m_QuantizationInfo = output_q_info;
params->output_info = output_tensor_info;

return err;
}

EthosnError EthosnAPI::Tanh(const Expr& expr, TanhParams* params) {
Call quantize = Downcast<Call>(expr);
Call tanh = Downcast<Call>(quantize->args[0]);
Expand Down Expand Up @@ -925,6 +1044,13 @@ EthosnError EthosnAPI::AsConstant(const Expr& expr, T* out) {
return EthosnError();
}

Expr FoldConstantExpr(const Expr& expr, bool fold_qnn) {
auto mod = IRModule::FromExpr(expr);
mod = transform::FoldConstant(fold_qnn)(mod);
auto entry_func = Downcast<Function>(mod->Lookup("main"));
return expr.as<FunctionNode>() == nullptr ? entry_func->body : entry_func;
}

} // namespace ethosn
} // namespace contrib
} // namespace relay
Expand Down
23 changes: 23 additions & 0 deletions src/relay/backend/contrib/ethosn/ethosn_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#ifndef TVM_RELAY_BACKEND_CONTRIB_ETHOSN_ETHOSN_API_H_
#define TVM_RELAY_BACKEND_CONTRIB_ETHOSN_ETHOSN_API_H_

#include <tvm/relay/attrs/nn.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/expr_functor.h>
#include <tvm/relay/transform.h>
Expand Down Expand Up @@ -115,6 +116,16 @@ struct LeakyReLUParams {
sl::TensorInfo output_info;
};

struct QnnConv2dTransposeParams {
sl::ConvolutionInfo conv_info;
sl::TensorInfo input_info;
sl::TensorInfo weights_info;
sl::TensorInfo bias_info;
sl::TensorInfo output_info;
runtime::NDArray raw_weights;
runtime::NDArray raw_bias;
};

struct ConcatenateParams {
sl::QuantizationInfo qInfo;
sl::ConcatenationInfo concat_info = sl::ConcatenationInfo(1, qInfo);
Expand Down Expand Up @@ -237,6 +248,9 @@ class EthosnAPI {
static EthosnError Tanh(const Expr& expr, TanhParams* params);
/*! \brief Extract the Support Library leaky relu params from an ethos-n leaky relu Relu call. */
static EthosnError LeakyReLU(const Expr& expr, LeakyReLUParams* params);
/*! \brief Extract the Support Library transpose params from a Relay
* ethos-n.qnn_conv2d_transpose func */
static EthosnError QnnConv2dTranspose(const Expr& expr, QnnConv2dTransposeParams* params);
/*! \brief Extract the Support Library concatenate params from a Relay qnn.concatenate call */
static EthosnError Concatenate(const Expr& expr, ConcatenateParams* params);
/*! \brief Extract the Support Library split params from a Relay split call */
Expand Down Expand Up @@ -294,6 +308,15 @@ class EthosnAPI {
static EthosnError AsConstant(const Expr& expr, std::valarray<float>* out);
};

/*!
* \brief Apply constant folding on an expression.
*
* \param expr The expression to fold.
* \param fold_qnn Whether to fold constants for QNN operations.
* \returns The new folded expression.
*/
Expr FoldConstantExpr(const Expr& expr, bool fold_qnn = true);

} // namespace ethosn
} // namespace contrib
} // namespace relay
Expand Down
Loading