From d96fcb6c58489f15d315fc125b9764a31e4f128a Mon Sep 17 00:00:00 2001 From: Sheng Zha Date: Sat, 26 Aug 2017 17:03:40 -0700 Subject: [PATCH] fix tests --- tests/python/gpu/test_operator_gpu.py | 4 ++-- tests/python/unittest/test_loss.py | 18 ++++++------------ 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/tests/python/gpu/test_operator_gpu.py b/tests/python/gpu/test_operator_gpu.py index 11d146cae840..0c5771ebffb6 100644 --- a/tests/python/gpu/test_operator_gpu.py +++ b/tests/python/gpu/test_operator_gpu.py @@ -1346,11 +1346,11 @@ def test_sequence_reverse(): def test_autograd_save_memory(): - x = mx.nd.zeros((128, 1024, 1024), ctx=mx.gpu(0)) + x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0)) x.attach_grad() with mx.autograd.record(): - for i in range(50): + for i in range(200): x = x + 1 x.wait_to_read() x.backward() diff --git a/tests/python/unittest/test_loss.py b/tests/python/unittest/test_loss.py index b864215ca1d1..85875c604bf0 100644 --- a/tests/python/unittest/test_loss.py +++ b/tests/python/unittest/test_loss.py @@ -63,7 +63,6 @@ def get_net(num_hidden): def test_ce_loss(): - mx.random.seed(1234) np.random.seed(1234) nclass = 10 N = 20 @@ -83,7 +82,6 @@ def test_ce_loss(): def test_bce_loss(): - mx.random.seed(1234) np.random.seed(1234) N = 20 data = mx.random.uniform(-1, 1, shape=(N, 20)) @@ -111,7 +109,6 @@ def test_bce_equal_ce2(): def test_kl_loss(): - mx.random.seed(1234) np.random.seed(1234) N = 20 data = mx.random.uniform(-1, 1, shape=(N, 10)) @@ -129,12 +126,11 @@ def test_kl_loss(): def test_l2_loss(): - mx.random.seed(1234) np.random.seed(1234) N = 20 data = mx.random.uniform(-1, 1, shape=(N, 10)) label = mx.random.uniform(-1, 1, shape=(N, 1)) - data_iter = mx.io.NDArrayIter(data, label, batch_size=10, label_name='label') + data_iter = mx.io.NDArrayIter(data, label, batch_size=10, label_name='label', shuffle=True) output = get_net(1) l = mx.symbol.Variable('label') Loss = gluon.loss.L2Loss() @@ -142,26 +138,25 @@ def test_l2_loss(): loss = Loss(output, l) loss = mx.sym.make_loss(loss) mod = mx.mod.Module(loss, data_names=('data',), label_names=('label',)) - mod.fit(data_iter, num_epoch=200, optimizer_params={'learning_rate': 1.}, - eval_metric=mx.metric.Loss()) + mod.fit(data_iter, num_epoch=200, optimizer_params={'learning_rate': 0.1, 'wd': 0.00045}, + initializer=mx.init.Xavier(magnitude=2), eval_metric=mx.metric.Loss()) assert mod.score(data_iter, eval_metric=mx.metric.Loss())[0][1] < 0.05 def test_l1_loss(): - mx.random.seed(1234) np.random.seed(1234) N = 20 data = mx.random.uniform(-1, 1, shape=(N, 10)) label = mx.random.uniform(-1, 1, shape=(N, 1)) - data_iter = mx.io.NDArrayIter(data, label, batch_size=10, label_name='label') + data_iter = mx.io.NDArrayIter(data, label, batch_size=10, label_name='label', shuffle=True) output = get_net(1) l = mx.symbol.Variable('label') Loss = gluon.loss.L1Loss() loss = Loss(output, l) loss = mx.sym.make_loss(loss) mod = mx.mod.Module(loss, data_names=('data',), label_names=('label',)) - mod.fit(data_iter, num_epoch=200, optimizer_params={'learning_rate': 0.1}, - initializer=mx.init.Uniform(0.5), eval_metric=mx.metric.Loss()) + mod.fit(data_iter, num_epoch=200, optimizer_params={'learning_rate': 0.01}, + initializer=mx.init.Xavier(magnitude=3), eval_metric=mx.metric.Loss()) assert mod.score(data_iter, eval_metric=mx.metric.Loss())[0][1] < 0.1 @@ -196,7 +191,6 @@ def test_ctc_loss(): def test_sample_weight_loss(): - mx.random.seed(1234) np.random.seed(1234) nclass = 10 N = 20