Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions include/tvm/attrs.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,22 @@ namespace tvm {
__fvisit__(#FieldName, &FieldName)


/*!
* \brief Create a NodeRef type that represents null.
* \tparam TNodeRef the type to be created.
* \return A instance that will represent None.
*/
template<typename TNodeRef>
inline TNodeRef NullValue() {
return TNodeRef(NodePtr<Node>(nullptr));
}

template<>
inline Type NullValue<Type>() {
return Type(Type::Handle, 0, 0);
}


/*! \brief Error thrown during attribute checking. */
struct AttrError : public dmlc::Error {
/*!
Expand Down
2 changes: 1 addition & 1 deletion include/tvm/packed_func_ext.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ inline TNodeRef TVMArgValue::AsNodeRef() const {
static_assert(
std::is_base_of<NodeRef, TNodeRef>::value,
"Conversion only works for NodeRef");
if (type_code_ == kNull) return TNodeRef();
if (type_code_ == kNull) return TNodeRef(NodePtr<Node>(nullptr));
TVM_CHECK_TYPE_CODE(type_code_, kNodeHandle);
NodePtr<Node>& sptr = *ptr<NodePtr<Node> >();
CHECK(NodeTypeChecker<TNodeRef>::Check(sptr.get()))
Expand Down
72 changes: 72 additions & 0 deletions include/tvm/relay/attrs/nn.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
/*!
* Copyright (c) 2018 by Contributors
* \file tvm/relay/attrs/nn.h
* \brief Auxiliary attributes for nn operators.
*/
#ifndef TVM_RELAY_ATTRS_NN_H_
#define TVM_RELAY_ATTRS_NN_H_

#include <tvm/attrs.h>
#include <string>

namespace tvm {
namespace relay {

/*! \brief Attributes used in convolution operators */
struct ConvAttrs : public tvm::AttrsNode<ConvAttrs> {
Array<IndexExpr> strides;
Array<IndexExpr> padding;
Array<IndexExpr> dilation;
int groups;
IndexExpr channels;
Array<IndexExpr> kernel_size;
std::string data_layout;
std::string weight_layout;
std::string out_layout;
DataType out_dtype;

TVM_DECLARE_ATTRS(ConvAttrs, "relay.attrs.ConvAttrs") {
TVM_ATTR_FIELD(strides).set_default(Array<IndexExpr>({1, 1}))
.describe("Specifies the strides of the convolution.");
TVM_ATTR_FIELD(padding).set_default(Array<IndexExpr>({0, 0}))
.describe("If padding is non-zero, then the input is implicitly zero-padded"
"on both sides for padding number of points");
TVM_ATTR_FIELD(dilation).set_default(Array<IndexExpr>({1, 1}))
.describe("Specifies the dilation rate to use for dilated convolution.");
TVM_ATTR_FIELD(groups).set_default(1)
.describe("Controls the connections between inputs and outputs."
"At groups=1, all inputs are convolved to all outputs."
"At groups=2, the operation becomes equivalent to having two convolution"
"layers side by side, each seeing half the input channels, and producing"
"half the output channels, and both subsequently concatenated.");
TVM_ATTR_FIELD(channels)
.describe("The number of output channels in the convolution."
" If it is not set, inferred by shape of the weight.")
.set_default(NullValue<IndexExpr>());
TVM_ATTR_FIELD(kernel_size)
.describe("Specifies the dimensions of the convolution window.")
.set_default(NullValue<Array<IndexExpr> >());
TVM_ATTR_FIELD(data_layout).set_default("NCHW")
.describe("Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
"'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
"dimensions respectively. Convolution is applied on the 'H' and"
"'W' dimensions.");
TVM_ATTR_FIELD(weight_layout).set_default("OIHW")
.describe("Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc."
"'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
"dimensions respectively.");
TVM_ATTR_FIELD(out_layout).set_default("__undef__")
.describe("Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
"'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
"dimensions respectively. Default to be same as input layout.");

// use 0 bits to indicate none.
TVM_ATTR_FIELD(out_dtype)
.set_default(Int(0))
.describe("Output data type, set to explicit type under mixed precision setting");
}
};

} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_ATTRS_NN_H_
2 changes: 1 addition & 1 deletion include/tvm/relay/base.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ using DataType = ::tvm::Type;
/*!
* \brief Symbolic expression for tensor shape.
*/
using ShapeExpr = ::tvm::Expr;
using IndexExpr = ::tvm::Expr;

/*!
* \brief Hash function for nodes.
Expand Down
4 changes: 3 additions & 1 deletion include/tvm/relay/expr.h
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,9 @@ class CallNode : public ExprNode {
v->Visit("_checked_type_", &checked_type_);
}

TVM_DLL static Call make(Expr op, Array<Expr> args, Attrs attrs = Attrs(),
TVM_DLL static Call make(Expr op,
Array<Expr> args,
Attrs attrs = Attrs(),
Array<Type> ty_args = Array<Type>());

static constexpr const char* _type_key = "relay.Call";
Expand Down
10 changes: 6 additions & 4 deletions include/tvm/relay/type.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,9 @@ class TensorTypeNode : public BaseTensorTypeNode {
public:
/*!
* \brief The shape of the tensor,
* represented by ShapeExpr(tvm::Expr).
* represented by IndexExpr(tvm::Expr).
*/
Array<ShapeExpr> shape;
Array<IndexExpr> shape;
/*! \brief The content data type */
DataType dtype;

Expand All @@ -82,7 +82,7 @@ class TensorTypeNode : public BaseTensorTypeNode {
v->Visit("span", &span);
}

TVM_DLL static TensorType make(Array<ShapeExpr> shape, DataType dtype);
TVM_DLL static TensorType make(Array<IndexExpr> shape, DataType dtype);

/*! \brief Construct an scalar containing elements of dtype. */
TVM_DLL static TensorType Scalar(DataType dtype);
Expand Down Expand Up @@ -273,8 +273,10 @@ class TypeReporterNode : public Node {
* \brief assert shape expression equals each other.
* \param lhs The left operand.
* \param rhs The right operand.
* \return false if assertation can be proven to have failed
* true if solver can still proceed.
*/
TVM_DLL virtual void AssertEQ(const ShapeExpr& lhs, const ShapeExpr& rhs) = 0;
TVM_DLL virtual bool AssertEQ(const IndexExpr& lhs, const IndexExpr& rhs) = 0;

// solver is not serializable.
void VisitAttrs(tvm::AttrVisitor* v) final {}
Expand Down
12 changes: 12 additions & 0 deletions include/tvm/runtime/packed_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -521,6 +521,12 @@ class TVMArgValue : public TVMPODValue_ {
if (type_code_ == kStr) {
return String2TVMType(operator std::string());
}
// None type
if (type_code_ == kNull) {
TVMType t;
t.code = kHandle; t.bits = 0; t.lanes = 0;
return t;
}
TVM_CHECK_TYPE_CODE(type_code_, kTVMType);
return value_.v_type;
}
Expand Down Expand Up @@ -878,6 +884,7 @@ inline std::ostream& operator<<(std::ostream& os, TVMType t) { // NOLINT(*)
#endif

inline std::string TVMType2String(TVMType t) {
if (t.bits == 0) return "";
#ifndef _LIBCPP_SGX_NO_IOSTREAMS
std::ostringstream os;
os << t;
Expand All @@ -896,6 +903,11 @@ inline std::string TVMType2String(TVMType t) {

inline TVMType String2TVMType(std::string s) {
TVMType t;
// handle None type
if (s.length() == 0) {
t.bits = 0; t.lanes = 0; t.code = kHandle;
return t;
}
t.bits = 32; t.lanes = 1;
const char* scan;
if (s.substr(0, 3) == "int") {
Expand Down
1 change: 1 addition & 0 deletions python/tvm/relay/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
# Operators
from .op import Op
from .op.tensor import *
from .op import nn

# Span
Span = base.Span
Expand Down
6 changes: 4 additions & 2 deletions python/tvm/relay/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,19 @@ class Environment(NodeBase):
options and more.
"""

def __init__(self, funcs):
def __init__(self, funcs=None):
"""Construct an environment.

Parameters
------
funcs: list of relay.Function
funcs : optional, dict
Map of global var to Function

Returns
------
env: A new environment containing :py:class:`~relay.env.Environment`.
"""
funcs = funcs if funcs else {}
self.__init_handle_by_constructor__(_make.Environment, funcs)

def add(self, var, func):
Expand Down
22 changes: 19 additions & 3 deletions python/tvm/relay/ir_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,26 @@
them in Python.
"""
from . import _ir_pass

# Expose checking expression, should rename to infer_type.
# pylint: disable=invalid-name
check_expr = _ir_pass.check_expr

def infer_type(env, expr):
"""Infer the type of expr under the context of env

Parameters
----------
env : relay.Environment
The global environmemt.

expr : relay.Expr
The input expression.

Returns
-------
checked_expr : relay.Expr
The checked expression.
"""
return _ir_pass.infer_type(env, expr)


well_formed = _ir_pass.well_formed

Expand Down
2 changes: 2 additions & 0 deletions python/tvm/relay/op/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@

# Operators
from .tensor import *
from . import nn


# operator registry
from . import _tensor
Expand Down
54 changes: 54 additions & 0 deletions python/tvm/relay/op/nn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
"""Neural network operations."""
from __future__ import absolute_import as _abs
from . import _make


def conv2d(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
weight_layout="OIHW",
out_layout="",
out_dtype=""):
"""Two dimensional convolution operator.

Parameters
----------
data : relay.Expr
The input data to the operator.

weight : relay.Expr
The weight expressions.

strides : tuple of int, optional
The strides of convoltution.

padding : tuple of int, optional
The padding of convolution on both sides of inputs.

dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.

groups : int, optional
Number of groups for grouped convolution.

data_layout : str, optional
Layout of the input.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we mention the possible options for data_layout, weight_layout etc?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@junrushao1994

It can be a sample then complete list if required.
The possible options list is very long and is explained in layout header.

Copy link
Member

@junrushao junrushao Oct 4, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@srkreddy1238 My comment here is too picky. Definitely It is not a big deal.

I am just a little bit worried whether Python frontend users will be willing to check C APIs described in tvm::relay::ConvAttrs defined in include/tvm/relay/attrs/nn.h.


weight_layout : str, optional
Layout of the weight.

out_layout : str, optional
Layout of the output.

out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
"""
return _make.conv2d(data, weight, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
weight_layout, out_layout, out_dtype)
3 changes: 3 additions & 0 deletions src/op/compute_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,9 @@ Operation ComputeOpNode::make(std::string name,
Map<std::string, NodeRef> attrs,
Array<IterVar> axis,
Array<Expr> body) {
if (!attrs.defined()) {
attrs = Map<std::string, NodeRef>();
}
auto n = make_node<ComputeOpNode>();
n->name = std::move(name);
n->tag = std::move(tag);
Expand Down
3 changes: 3 additions & 0 deletions src/op/extern_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,9 @@ Operation ExternOpNode::make(std::string name,
Array<Buffer> input_placeholders,
Array<Buffer> output_placeholders,
Stmt body) {
if (!attrs.defined()) {
attrs = Map<std::string, NodeRef>();
}
auto n = make_node<ExternOpNode>();
n->name = std::move(name);
n->tag = std::move(tag);
Expand Down
3 changes: 3 additions & 0 deletions src/op/scan_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@ Operation ScanOpNode::make(std::string name,
Array<Tensor> update,
Array<Tensor> state_placeholder,
Array<Tensor> inputs) {
if (!attrs.defined()) {
attrs = Map<std::string, NodeRef>();
}
auto n = make_node<ScanOpNode>();
CHECK_EQ(init.size(), update.size());
CHECK_EQ(init.size(), state_placeholder.size());
Expand Down
13 changes: 13 additions & 0 deletions src/pass/ir_deep_compare.cc
Original file line number Diff line number Diff line change
Expand Up @@ -418,6 +418,19 @@ bool Equal(const Stmt& lhs, const Stmt& rhs) {
}

bool Equal(const Expr& lhs, const Expr& rhs) {
// quick pass for constant expressions.
if (const int64_t *a = as_const_int(lhs)) {
if (const int64_t *b = as_const_int(rhs)) {
return a[0] == b[0];
}
}
if (!lhs.defined()) {
if (rhs.defined()) return false;
if (!rhs.defined()) return true;
} else {
if (!rhs.defined()) return false;
}
// deep comparison.
return IRDeepCompare().Equal(lhs, rhs);
}

Expand Down
4 changes: 2 additions & 2 deletions src/relay/ir/type.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ namespace relay {
using tvm::IRPrinter;
using namespace tvm::runtime;

TensorType TensorTypeNode::make(Array<ShapeExpr> shape, DataType dtype) {
TensorType TensorTypeNode::make(Array<IndexExpr> shape, DataType dtype) {
NodePtr<TensorTypeNode> n = make_node<TensorTypeNode>();
n->shape = std::move(shape);
n->dtype = std::move(dtype);
Expand All @@ -24,7 +24,7 @@ TensorType TensorTypeNode::Scalar(DataType dtype) {

TVM_REGISTER_API("relay._make.TensorType")
.set_body([](TVMArgs args, TVMRetValue *ret) {
Array<ShapeExpr> shape = args[0];
Array<IndexExpr> shape = args[0];
*ret = TensorTypeNode::make(shape, args[1]);
});

Expand Down
Loading