Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions benchmark/python/control_flow/rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@


class ForeachRNN(gluon.HybridBlock):
def __init__(self, cell, length, prefix=None, params=None):
super(ForeachRNN, self).__init__(prefix=prefix, params=params)
def __init__(self, cell, length):
super(ForeachRNN, self).__init__()
self.length = length
self.cell = cell

Expand All @@ -48,8 +48,8 @@ def forward(self, inputs, states):


class WhileRNN(gluon.HybridBlock):
def __init__(self, cell, length, prefix=None, params=None):
super(WhileRNN, self).__init__(prefix=prefix, params=params)
def __init__(self, cell, length):
super(WhileRNN, self).__init__()
self.length = length
self.cell = cell

Expand Down Expand Up @@ -90,6 +90,7 @@ def run_benchmark(cell_type, ctx, seq_len, batch_size, hidden_dim):

for is_train, is_hyb_cell, is_hyb_layer in product([True, False], [False, True], [False, True]):
cell = cell_type(hidden_dim)
cell.infer_shape(0, inputs, False)
if is_hyb_cell:
cell.hybridize(static_alloc=True)
layer = obj(cell, seq_len)
Expand Down
16 changes: 8 additions & 8 deletions tests/nightly/estimator/test_estimator_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import sys

import mxnet as mx
import numpy as np
import numpy as onp
from mxnet import gluon, init, nd
from mxnet.gluon import data
from mxnet.gluon.contrib.estimator import estimator
Expand Down Expand Up @@ -60,11 +60,11 @@ def bilinear_kernel(in_channels, out_channels, kernel_size):
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
og = onp.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype='float32')
weight = onp.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype='float32')
weight[range(in_channels), range(out_channels), :, :] = filt
return nd.array(weight)
return mx.np.array(weight)


def get_net(model_name, context):
Expand Down Expand Up @@ -100,10 +100,10 @@ def test_estimator_cpu():
context = mx.cpu()
for model_name in models:
net, input_shape, label_shape, loss_axis = get_net(model_name, context)
train_dataset = gluon.data.dataset.ArrayDataset(mx.nd.random.uniform(shape=input_shape),
mx.nd.zeros(shape=label_shape))
val_dataset = gluon.data.dataset.ArrayDataset(mx.nd.random.uniform(shape=input_shape),
mx.nd.zeros(shape=label_shape))
train_dataset = gluon.data.dataset.ArrayDataset(mx.np.random.uniform(size=input_shape),
mx.np.zeros(shape=label_shape))
val_dataset = gluon.data.dataset.ArrayDataset(mx.np.random.uniform(size=input_shape),
mx.np.zeros(shape=label_shape))
loss = gluon.loss.SoftmaxCrossEntropyLoss(axis=loss_axis)
train_data = gluon.data.DataLoader(train_dataset, batch_size=1)
val_data = gluon.data.DataLoader(val_dataset, batch_size=1)
Expand Down
24 changes: 12 additions & 12 deletions tests/nightly/estimator/test_sentiment_rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ def __init__(self, vocab, embed_size, kernel_sizes, num_channels,
def forward(self, inputs):
# Concatenate the output of two embedding layers with shape of
# (batch size, number of words, word vector dimension) by word vector
embeddings = nd.concat(
self.embedding(inputs), self.constant_embedding(inputs), dim=2)
embeddings = mx.np.concatenate(
[self.embedding(inputs), self.constant_embedding(inputs)], axis=2)
# According to the input format required by Conv1D, the word vector
# dimension, that is, the channel dimension of the one-dimensional
# convolutional layer, is transformed into the previous dimension
Expand All @@ -65,8 +65,8 @@ def forward(self, inputs):
# pooling, an NDArray with the shape of (batch size, channel size, 1)
# can be obtained. Use the flatten function to remove the last
# dimension and then concatenate on the channel dimension
encoding = nd.concat(*[nd.flatten(
self.pool(conv(embeddings))) for conv in self.convs], dim=1)
encoding = mx.np.concatenate([mx.npx.batch_flatten(
self.pool(conv(embeddings))) for conv in self.convs], axis=1)
# After applying the dropout method, use a fully connected layer to
# obtain the output
outputs = self.decoder(self.dropout(encoding))
Expand Down Expand Up @@ -95,7 +95,7 @@ def forward(self, inputs):
# Concatenate the hidden states of the initial time step and final
# time step to use as the input of the fully connected layer. Its
# shape is (batch size, 4 * number of hidden units)
encoding = nd.concat(states[0], states[-1])
encoding = mx.np.concatenate([states[0], states[-1]], axis=1)
outputs = self.decoder(encoding)
return outputs

Expand Down Expand Up @@ -173,8 +173,8 @@ def pad(x):
return x[:max_l] if len(x) > max_l else x + [0] * (max_l - len(x))

tokenized_data = get_tokenized_imdb(data)
features = nd.array([pad(vocab.to_indices(x)) for x in tokenized_data])
labels = nd.array([score for _, score in data])
features = mx.np.array([pad(vocab.to_indices(x)) for x in tokenized_data])
labels = mx.np.array([score for _, score in data])
return features, labels


Expand Down Expand Up @@ -213,16 +213,16 @@ def test_estimator_cpu():
lr = 1
num_epochs = 1

train_data = mx.nd.random.randint(low=0, high=100, shape=(2 * batch_size, 500))
train_label = mx.nd.random.randint(low=0, high=2, shape=(2 * batch_size,))
val_data = mx.nd.random.randint(low=0, high=100, shape=(batch_size, 500))
val_label = mx.nd.random.randint(low=0, high=2, shape=(batch_size,))
train_data = mx.np.random.randint(low=0, high=100, size=(2 * batch_size, 500))
train_label = mx.np.random.randint(low=0, high=2, size=(2 * batch_size,))
val_data = mx.np.random.randint(low=0, high=100, size=(batch_size, 500))
val_label = mx.np.random.randint(low=0, high=2, size=(batch_size,))

train_dataloader = gluon.data.DataLoader(dataset=gluon.data.ArrayDataset(train_data, train_label),
batch_size=batch_size, shuffle=True)
val_dataloader = gluon.data.DataLoader(dataset=gluon.data.ArrayDataset(val_data, val_label),
batch_size=batch_size)
vocab_list = mx.nd.zeros(shape=(100,))
vocab_list = mx.np.zeros(shape=(100,))

# Get the model
for model in models:
Expand Down
20 changes: 10 additions & 10 deletions tests/nightly/model_backwards_compatibility_check/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import numpy as np
import logging
from mxnet import gluon
import mxnet.ndarray as F
from mxnet.gluon import nn
import re
from mxnet.test_utils import assert_almost_equal
Expand Down Expand Up @@ -57,11 +56,11 @@ def save_inference_results(inference_results, model_name):
assert (isinstance(inference_results, mx.ndarray.ndarray.NDArray))
save_path = os.path.join(get_model_path(model_name), ''.join([model_name, '-inference']))

mx.nd.save(save_path, {'inference': inference_results})
mx.npx.savez(save_path, **{'inference': inference_results})


def load_inference_results(model_name):
inf_dict = mx.nd.load(model_name+'-inference')
inf_dict = mx.npx.load(model_name+'-inference')
return inf_dict['inference']


Expand All @@ -70,7 +69,7 @@ def save_data_and_labels(test_data, test_labels, model_name):
assert (isinstance(test_labels, mx.ndarray.ndarray.NDArray))

save_path = os.path.join(get_model_path(model_name), ''.join([model_name, '-data']))
mx.nd.save(save_path, {'data': test_data, 'labels': test_labels})
mx.npx.savez(save_path, **{'data': test_data, 'labels': test_labels})


def clean_model_files(files, model_name):
Expand Down Expand Up @@ -135,6 +134,7 @@ def create_model_folder(model_name):
os.makedirs(path)


@mx.util.use_np
class Net(gluon.Block):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
Expand All @@ -146,13 +146,13 @@ def __init__(self, **kwargs):
self.fc2 = nn.Dense(2)

def forward(self, x):
x = self.pool1(F.tanh(self.conv1(x)))
x = self.pool2(F.tanh(self.conv2(x)))
x = self.pool1(mx.np.tanh(self.conv1(x)))
x = self.pool2(mx.np.tanh(self.conv2(x)))
# 0 means copy over size from corresponding dimension.
# -1 means infer size from the rest of dimensions.
x = x.reshape((0, -1))
x = F.tanh(self.fc1(x))
x = F.tanh(self.fc2(x))
x = x.reshape(-1)
x = mx.np.tanh(self.fc1(x))
x = mx.np.tanh(self.fc2(x))
return x


Expand All @@ -172,7 +172,7 @@ def forward(self, x):
x = self.pool2(mx.np.tanh(self.conv2(x)))
# 0 means copy over size from corresponding dimension.
# -1 means infer size from the rest of dimensions.
x = x.reshape((0, -1))
x = x.reshape(-1)
x = mx.np.tanh(self.fc1(x))
x = mx.np.tanh(self.fc2(x))
return x
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@ def test_lenet_gluon_load_params_api():
logging.warn('No training files found for %s for MXNet version : %s' % (model_name, folder))
continue

data = mx.nd.load(''.join([model_name, '-data']))
data = mx.npx.load(''.join([model_name, '-data']))
test_data = data['data']
# Load the model and perform inference
loaded_model = Net()
loaded_model.load_params(model_name + '-params')
output = loaded_model(test_data)
old_inference_results = mx.nd.load(model_name + '-inference')['inference']
old_inference_results = mx.npx.load(model_name + '-inference')['inference']
assert_almost_equal(old_inference_results.asnumpy(), output.asnumpy(), rtol=rtol_default, atol=atol_default)
clean_model_files(model_files, model_name)
logging.info('=================================')
Expand All @@ -54,12 +54,12 @@ def test_lenet_gluon_hybrid_imports_api():
logging.warn('No training files found for %s for MXNet version : %s' % (model_name, folder))
continue
# Load the model and perform inference
data = mx.np.load(''.join([model_name, '-data']))
data = mx.npx.load(''.join([model_name, '-data']))
test_data = data['data']
loaded_model = HybridNet()
loaded_model = gluon.SymbolBlock.imports(model_name + '-symbol.json', ['data'], model_name + '-0000.params')
output = loaded_model(test_data)
old_inference_results = mx.np.load(model_name + '-inference')['inference']
old_inference_results = mx.npx.load(model_name + '-inference')['inference']
assert_almost_equal(old_inference_results.asnumpy(), output.asnumpy(), rtol=rtol_default, atol=atol_default)
clean_model_files(model_files, model_name)
logging.info('=================================')
Expand All @@ -85,13 +85,13 @@ def test_lstm_gluon_load_parameters_api():
logging.warn('No training files found for %s for MXNet version : %s' % (model_name, folder))
continue

data = mx.nd.load(''.join([model_name, '-data']))
data = mx.npx.load(''.join([model_name, '-data']))
test_data = data['data']
# Load the model and perform inference
loaded_model = SimpleLSTMModel()
loaded_model.load_parameters(model_name + '-params')
output = loaded_model(test_data)
old_inference_results = mx.nd.load(model_name + '-inference')['inference']
old_inference_results = mx.npx.load(model_name + '-inference')['inference']
assert_almost_equal(old_inference_results.asnumpy(), output.asnumpy(), rtol=rtol_default, atol=atol_default)
clean_model_files(model_files, model_name)
logging.info('=================================')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,13 @@ def train_lenet_gluon_save_params_api():
net.initialize(weights, ctx=[mx.cpu(0)])
# Prepare data

test_data = mx.nd.array(np.random.uniform(-1, 1, size=(20, 1, 30, 30)))
test_data = mx.np.random.uniform(-1, 1, size=(20, 1, 30, 30))
output = net(test_data)
# print (y)

mx.nd.save(os.path.join(get_model_path(model_name), ''.join([model_name, '-data'])), {'data': test_data})
mx.npx.savez(os.path.join(get_model_path(model_name), ''.join([model_name, '-data'])), **{'data': test_data})
save_inference_results(output, model_name)
net.save_params(os.path.join(get_model_path(model_name), ''.join([model_name, '-params'])))
net.save(os.path.join(get_model_path(model_name), ''.join([model_name, '-params'])))


@mx.util.use_np
Expand All @@ -55,7 +55,7 @@ def train_lenet_gluon_hybrid_export_api():
# Save the inference output ys
# Save the model params

mx.npx.savez(os.path.join(get_model_path(model_name), ''.join([model_name, '-data'])), {'data': test_data})
mx.npx.savez(os.path.join(get_model_path(model_name), ''.join([model_name, '-data'])), **{'data': test_data})
save_inference_results(output, model_name)
if compare_versions(str(mxnet_version) , '1.1.0') < 0:
# v1.0.0 does not have the epoch param in the .exports API. Hence adding this safety net
Expand All @@ -81,10 +81,10 @@ def train_lstm_gluon_save_parameters_api():
weights = mx.initializer.Xavier(magnitude=2.57)
net.initialize(weights, ctx=[mx.cpu(0)])

test_data = mx.nd.array(np.random.uniform(-1, 1, size=(10, 30)))
test_data = mx.np.array(np.random.uniform(-1, 1, size=(10, 30)))
output = net(test_data)
# print output
mx.nd.save(os.path.join(get_model_path(model_name), ''.join([model_name, '-data'])), {'data': test_data})
mx.npx.savez(os.path.join(get_model_path(model_name), ''.join([model_name, '-data'])), **{'data': test_data})
save_inference_results(output, model_name)
net.save_parameters(os.path.join(get_model_path(model_name), ''.join([model_name, '-params'])))

Expand Down