Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
0f7bcf8
[numpy] Shape support scalar tensor (#14315)
reminisce Mar 6, 2019
fc44472
[Numpy] Change semantics of ndim for operators in `src/operator/contr…
junrushao Mar 15, 2019
f659034
[WIP] Use new shape definition (#14453)
reminisce Mar 18, 2019
19434dd
[numpy] Fix unit tests after introducing numpy compatible shapes (#14…
reminisce Mar 22, 2019
29bfafc
Fix a bug to pass the test in test_contrib_rnn (#14520)
zheng-da Mar 26, 2019
15fc6ce
[numpy] Fix test_dynamic_shape.test_dynamic_shape (#14538)
junrushao Mar 27, 2019
8604087
[numpy] Fix numpy import in python2 (#14537)
reminisce Mar 27, 2019
6d0a391
fix concat and slice (#14549)
TaoLv Mar 29, 2019
6b8158a
fix R-package (#14536)
hetong007 Apr 3, 2019
0f0331d
Fix cpp package build after using new shape definition (#14554)
reminisce Apr 3, 2019
62bc5f7
Fix pooling_v1 and deformable_convolution param initialization (#14577)
reminisce Apr 4, 2019
f4e57d1
[Numpy] Misc fix (#14612)
junrushao Apr 4, 2019
58beb06
[Numpy] fix test_operator_gpu.test_upsampling_bilinear_with_type (#14…
junrushao Apr 4, 2019
dc7cf99
[Numpy] Java/Scala modification (#14625)
yzhliu Apr 5, 2019
7568b54
fix shape index bug (#14630)
eric-haibin-lin Apr 5, 2019
052936f
fix jni lint (#14634)
yzhliu Apr 5, 2019
10d2150
Remove numpy namespaces for operator registration
reminisce Apr 4, 2019
4c2d34b
Fix bug when shape is compeltely unknown
reminisce Apr 4, 2019
8dfd965
Fix singed/unsigned compare warning
reminisce Apr 4, 2019
439e177
Fix CI
reminisce Apr 4, 2019
51713e8
Fix pylint
reminisce Apr 4, 2019
6a13bca
Avoid launching gpu kernels for zero-size output tensors
reminisce Apr 5, 2019
d8fbba3
Fix test_ndarray
reminisce Apr 5, 2019
9c4e208
Fix binary broadcast with zero-size tensors
reminisce Apr 5, 2019
5a96f97
Better error message for infer shape failure in imperative
reminisce Apr 5, 2019
a5ac531
Fix TShape constructor ambiguity on certain platforms
reminisce Apr 6, 2019
9af65b0
Fix mkldnn build failure
reminisce Apr 6, 2019
bdcfd1a
Fix build failure in gpu and cpp test
reminisce Apr 6, 2019
ff1ac1e
Fix gpu cpp test build with mkldnn
reminisce Apr 6, 2019
719e7dc
Fix mkldnn cpp test
reminisce Apr 6, 2019
d621688
Fix concatenating zero-size tensors
reminisce Apr 7, 2019
b4cc471
Avoid letting mkldnn handle zero-size tensors in concat
reminisce Apr 7, 2019
8663de3
Fix quantized_concat infer shape
reminisce Apr 7, 2019
9e42544
Try to fix perl c api
reminisce Apr 7, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions R-package/src/ndarray.cc
Original file line number Diff line number Diff line change
Expand Up @@ -179,8 +179,8 @@ Rcpp::RObject NDArrayPacker::CreateNDArrayPacker() {
}

Rcpp::Dimension NDArray::dim() const {
mx_uint ndim;
const mx_uint *pshape;
int ndim;
const int *pshape;
MX_CALL(MXNDArrayGetShape(
ptr_->handle, &ndim, &pshape));
Rcpp::IntegerVector dat(pshape, pshape + ndim);
Expand Down
18 changes: 9 additions & 9 deletions R-package/src/symbol.cc
Original file line number Diff line number Diff line change
Expand Up @@ -167,8 +167,8 @@ Symbol::RObjectType Symbol::GetOutput(mx_uint index) const {

// helper function to convert shape into Rcpp vector
inline Rcpp::List BuildShapeData(mx_uint shape_size,
const mx_uint *shape_ndim,
const mx_uint **shape_data,
const int *shape_ndim,
const int **shape_data,
const std::vector<std::string> &names) {
Rcpp::List ret(shape_size);
for (mx_uint i = 0; i < shape_size; ++i) {
Expand All @@ -185,7 +185,7 @@ SEXP Symbol::InferShape(const Rcpp::List& kwargs) const {
<< "Need to pass parameters in key=value style.\n";
std::vector<std::string> keys = kwargs.names();
std::vector<mx_uint> arg_ind_ptr(1, 0);
std::vector<mx_uint> arg_shape_data;
std::vector<int> arg_shape_data;

for (size_t i = 0; i < kwargs.size(); ++i) {
RCHECK(keys[i].length() != 0)
Expand All @@ -197,14 +197,14 @@ SEXP Symbol::InferShape(const Rcpp::List& kwargs) const {
std::vector<const char*> c_keys = CKeys(keys);

mx_uint in_shape_size;
const mx_uint *in_shape_ndim;
const mx_uint **in_shape_data;
const int *in_shape_ndim;
const int **in_shape_data;
mx_uint out_shape_size;
const mx_uint *out_shape_ndim;
const mx_uint **out_shape_data;
const int *out_shape_ndim;
const int **out_shape_data;
mx_uint aux_shape_size;
const mx_uint *aux_shape_ndim;
const mx_uint **aux_shape_data;
const int *aux_shape_ndim;
const int **aux_shape_data;
int complete;

MX_CALL(MXSymbolInferShape(
Expand Down
6 changes: 3 additions & 3 deletions cpp-package/include/mxnet-cpp/ndarray.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -397,11 +397,11 @@ inline size_t NDArray::Size() const {
}

inline std::vector<mx_uint> NDArray::GetShape() const {
const mx_uint *out_pdata;
mx_uint out_dim;
const int *out_pdata;
int out_dim;
MXNDArrayGetShape(blob_ptr_->handle_, &out_dim, &out_pdata);
std::vector<mx_uint> ret;
for (mx_uint i = 0; i < out_dim; ++i) {
for (int i = 0; i < out_dim; ++i) {
ret.push_back(out_pdata[i]);
}
return ret;
Expand Down
20 changes: 10 additions & 10 deletions cpp-package/include/mxnet-cpp/symbol.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ inline void Symbol::InferShape(

std::vector<const char *> keys;
std::vector<mx_uint> arg_ind_ptr;
std::vector<mx_uint> arg_shape_data;
std::vector<int> arg_shape_data;

for (const auto &arg : arg_shapes) {
keys.push_back(arg.first.c_str());
Expand All @@ -200,14 +200,14 @@ inline void Symbol::InferShape(
arg_ind_ptr.push_back(arg_shape_data.size());

mx_uint in_shape_size;
const mx_uint *in_shape_ndim;
const mx_uint **in_shape_data;
const int*in_shape_ndim;
const int **in_shape_data;
mx_uint out_shape_size;
const mx_uint *out_shape_ndim;
const mx_uint **out_shape_data;
const int *out_shape_ndim;
const int **out_shape_data;
mx_uint aux_shape_size;
const mx_uint *aux_shape_ndim;
const mx_uint **aux_shape_data;
const int *aux_shape_ndim;
const int **aux_shape_data;
int complete;

CHECK_EQ(MXSymbolInferShape(GetHandle(), keys.size(), keys.data(),
Expand All @@ -221,19 +221,19 @@ inline void Symbol::InferShape(
if (complete) {
for (mx_uint i = 0; i < in_shape_size; ++i) {
in_shape->push_back(std::vector<mx_uint>());
for (mx_uint j = 0; j < in_shape_ndim[i]; ++j) {
for (int j = 0; j < in_shape_ndim[i]; ++j) {
(*in_shape)[i].push_back(in_shape_data[i][j]);
}
}
for (mx_uint i = 0; i < aux_shape_size; ++i) {
aux_shape->push_back(std::vector<mx_uint>());
for (mx_uint j = 0; j < aux_shape_ndim[i]; ++j) {
for (int j = 0; j < aux_shape_ndim[i]; ++j) {
(*aux_shape)[i].push_back(aux_shape_data[i][j]);
}
}
for (mx_uint i = 0; i < out_shape_size; ++i) {
out_shape->push_back(std::vector<mx_uint>());
for (mx_uint j = 0; j < out_shape_ndim[i]; ++j) {
for (int j = 0; j < out_shape_ndim[i]; ++j) {
(*out_shape)[i].push_back(out_shape_data[i][j]);
}
}
Expand Down
55 changes: 34 additions & 21 deletions include/mxnet/c_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ typedef int (*CustomOpFBFunc)(int /*size*/, void** /*ptrs*/, int* /*tags*/,
typedef int (*CustomOpDelFunc)(void* /*state*/);
typedef int (*CustomOpListFunc)(char*** /*args*/, void* /*state*/);
typedef int (*CustomOpInferShapeFunc)(int /*num_input*/, int* /*ndims*/,
unsigned** /*shapes*/, void* /*state*/);
int** /*shapes*/, void* /*state*/);
typedef int (*CustomOpInferStorageTypeFunc)(int /*num_input*/, int* /*stypes*/, void* /*state*/);
typedef int (*CustomOpBackwardInferStorageTypeFunc)(int /*num_input*/,
int * /*stypes*/,
Expand Down Expand Up @@ -763,8 +763,8 @@ MXNET_DLL int MXNDArrayReshape64(NDArrayHandle handle,
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXNDArrayGetShape(NDArrayHandle handle,
mx_uint *out_dim,
const mx_uint **out_pdata);
int *out_dim,
const int **out_pdata);
/*!
* \brief get the content of the data in NDArray
* \param handle the handle to the ndarray
Expand Down Expand Up @@ -1036,6 +1036,19 @@ MXNET_DLL int MXAutogradIsRecording(bool* curr);
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXAutogradIsTraining(bool* curr);
/*!
* \brief get whether numpy compatibility is on
* \param curr returns the current status
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXIsNumpyCompatible(bool* curr);
/*!
* \brief set numpy compatibility switch
* \param is_np_comp 1 when numpy compatibility is on, 0 when off
* \param prev returns the previous status before this set
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXSetIsNumpyCompatible(int is_np_comp, int* prev);
/*!
* \brief mark NDArrays as variables to compute gradient for autograd
* \param num_var number of variable NDArrays
Expand Down Expand Up @@ -1481,16 +1494,16 @@ MXNET_DLL int MXSymbolInferShape(SymbolHandle sym,
mx_uint num_args,
const char** keys,
const mx_uint *arg_ind_ptr,
const mx_uint *arg_shape_data,
const int *arg_shape_data,
mx_uint *in_shape_size,
const mx_uint **in_shape_ndim,
const mx_uint ***in_shape_data,
const int **in_shape_ndim,
const int ***in_shape_data,
mx_uint *out_shape_size,
const mx_uint **out_shape_ndim,
const mx_uint ***out_shape_data,
const int **out_shape_ndim,
const int ***out_shape_data,
mx_uint *aux_shape_size,
const mx_uint **aux_shape_ndim,
const mx_uint ***aux_shape_data,
const int **aux_shape_ndim,
const int ***aux_shape_data,
int *complete);
/*!
* \brief partially infer shape of unknown input shapes given the known one.
Expand Down Expand Up @@ -1520,16 +1533,16 @@ MXNET_DLL int MXSymbolInferShapePartial(SymbolHandle sym,
mx_uint num_args,
const char** keys,
const mx_uint *arg_ind_ptr,
const mx_uint *arg_shape_data,
const int *arg_shape_data,
mx_uint *in_shape_size,
const mx_uint **in_shape_ndim,
const mx_uint ***in_shape_data,
const int **in_shape_ndim,
const int ***in_shape_data,
mx_uint *out_shape_size,
const mx_uint **out_shape_ndim,
const mx_uint ***out_shape_data,
const int **out_shape_ndim,
const int ***out_shape_data,
mx_uint *aux_shape_size,
const mx_uint **aux_shape_ndim,
const mx_uint ***aux_shape_data,
const int **aux_shape_ndim,
const int ***aux_shape_data,
int *complete);

/*!
Expand Down Expand Up @@ -1808,7 +1821,7 @@ MXNET_DLL int MXExecutorSimpleBind(SymbolHandle symbol_handle,
const char** provided_grad_req_types,
const mx_uint num_provided_arg_shapes,
const char** provided_arg_shape_names,
const mx_uint* provided_arg_shape_data,
const int* provided_arg_shape_data,
const mx_uint* provided_arg_shape_idx,
const mx_uint num_provided_arg_dtypes,
const char** provided_arg_dtype_names,
Expand Down Expand Up @@ -1862,7 +1875,7 @@ MXNET_DLL int MXExecutorReshape(int partial_shaping,
const int* map_dev_ids,
const mx_uint num_provided_arg_shapes,
const char** provided_arg_shape_names,
const mx_uint* provided_arg_shape_data,
const int* provided_arg_shape_data,
const mx_uint* provided_arg_shape_idx,
mx_uint* num_in_args,
NDArrayHandle** in_args,
Expand Down Expand Up @@ -2538,8 +2551,8 @@ MXNET_DLL int MXNDArrayGetSharedMemHandle(NDArrayHandle handle, int* shared_pid,
* \param dtype data type of NDArray
* \param out constructed NDArray
*/
MXNET_DLL int MXNDArrayCreateFromSharedMem(int shared_pid, int shared_id, const mx_uint *shape,
mx_uint ndim, int dtype, NDArrayHandle *out);
MXNET_DLL int MXNDArrayCreateFromSharedMem(int shared_pid, int shared_id, const int *shape,
int ndim, int dtype, NDArrayHandle *out);


#ifdef __cplusplus
Expand Down
16 changes: 16 additions & 0 deletions include/mxnet/imperative.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,16 @@ class Imperative {
is_recording_ = is_recording;
return old;
}
/*! brief whether numpy compatibility is on. */
bool is_np_comp() const {
return is_np_comp_;
}
/*! brief turn on or turn off numpy compatibility switch. */
bool set_is_np_comp(bool is_np_comp) {
bool old = is_np_comp_;
is_np_comp_ = is_np_comp;
return old;
}
/*! \brief to record operator, return corresponding node. */
void RecordOp(nnvm::NodeAttrs&& attrs,
const std::vector<NDArray*>& inputs,
Expand Down Expand Up @@ -165,9 +175,15 @@ class Imperative {
#if DMLC_CXX11_THREAD_LOCAL
static thread_local bool is_train_;
static thread_local bool is_recording_;
// TOOD(junwu): Added numpy compatibility switch for backward compatibility.
// Delete it in the next major release.
static thread_local bool is_np_comp_;
#else
static MX_THREAD_LOCAL bool is_train_;
static MX_THREAD_LOCAL bool is_recording_;
// TOOD(junwu): Added numpy compatibility switch for backward compatibility.
// Delete it in the next major release.
static MX_THREAD_LOCAL bool is_np_comp_;
#endif
/*! \brief node count used for naming */
std::atomic<uint64_t> node_count_{0};
Expand Down
11 changes: 7 additions & 4 deletions include/mxnet/ndarray.h
Original file line number Diff line number Diff line change
Expand Up @@ -859,12 +859,15 @@ class NDArray {
Chunk(mxnet::TShape shape, Context ctx_, bool delay_alloc_, int dtype)
: static_data(false), delay_alloc(true), ctx(ctx_),
storage_ref_(Storage::_GetSharedRef()) {
auto size = shape.Size();
storage_shape = shape;
if (shape_is_known(storage_shape)) {
shandle.size = shape.Size() * mshadow::mshadow_sizeof(dtype);
}
var = Engine::Get()->NewVariable();
shandle.size = size * mshadow::mshadow_sizeof(dtype);
shandle.ctx = ctx_;
if (!delay_alloc_) this->CheckAndAlloc();
if (!delay_alloc_) {
this->CheckAndAlloc();
}
}

Chunk(const TBlob &data, int dev_id)
Expand Down Expand Up @@ -953,7 +956,7 @@ class NDArray {
/*! \brief set the shape for ith aux data, and update storage shape if necessary */
inline void set_aux_shape(const size_t i, const mxnet::TShape& shape) {
aux_shapes[i] = shape;
if (storage_shape.ndim() > 0) {
if (storage_shape.ndim() >= 0) {
if (storage_type == kRowSparseStorage && i == rowsparse::kIdx) {
storage_shape[0] = shape[0];
} else if (storage_type == kCSRStorage && i == csr::kIdx) {
Expand Down
3 changes: 2 additions & 1 deletion include/mxnet/tensor_blob.h
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,6 @@ class TBlob {
<< "Expected: " << type_flag_ << " v.s. given " << mshadow::DataType<DType>::kFlag;
return mshadow::Tensor<Device, 2, DType>(static_cast<DType*>(dptr_),
shape_.FlatTo2D(),
shape_[shape_.ndim() - 1],
stream);
}
/*!
Expand Down Expand Up @@ -419,6 +418,8 @@ class TBlob {
namespace dmlc {
// Add a few patches to support mxnet::TShape in dmlc/parameter.
DMLC_DECLARE_TYPE_NAME(mxnet::TShape, "Shape(tuple)");
DMLC_DECLARE_TYPE_NAME(mxnet::Tuple<int>, "Shape(tuple)");
DMLC_DECLARE_TYPE_NAME(mxnet::Tuple<dmlc::optional<int>>, "Shape(tuple)");
DMLC_DECLARE_TYPE_NAME(nnvm::Tuple<int>, "Shape(tuple)");
DMLC_DECLARE_TYPE_NAME(nnvm::Tuple<dmlc::optional<int>>, "Shape(tuple)");

Expand Down
Loading