This repository was archived by the owner on Nov 17, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 6.7k
Fix dist kvstore for trainer and flaky dist kvstore test #11633
Merged
Merged
Changes from all commits
Commits
Show all changes
12 commits
Select commit
Hold shift + click to select a range
54eeaaf
fix dist kvstore trainer
eric-haibin-lin 15be79c
fix test setup
eric-haibin-lin 7a22ea3
enable tests on CI
eric-haibin-lin adc933d
update move some test to cpu
eric-haibin-lin 9dda024
dont use nvdia-docker
eric-haibin-lin 64cd900
rename option
eric-haibin-lin 103e6e1
trigger test
eric-haibin-lin b735964
Merge branch 'master' into fix-dist-kv
eric-haibin-lin ece12ea
reduce workload to avvoid time out
eric-haibin-lin ce8a3e9
disable operator tuning to reduce launch overhead
eric-haibin-lin c6e1f3b
merge
eric-haibin-lin 4649dcc
update test types
eric-haibin-lin File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -375,7 +375,7 @@ def check_invalid_pull(): | |
| check_invalid_gluon_trainer_reset() | ||
| check_invalid_pull() | ||
|
|
||
| def test_gluon_trainer(): | ||
| def test_gluon_trainer_type(): | ||
| def check_trainer_kv_type(stype, grad_stype, update_on_kv): | ||
| params = mx.gluon.ParameterDict() | ||
| x = params.get('x', shape=(10,1), lr_mult=1.0, stype=stype, grad_stype=grad_stype) | ||
|
|
@@ -388,28 +388,67 @@ def check_trainer_kv_type(stype, grad_stype, update_on_kv): | |
| check_trainer_kv_type('default', 'default', False) | ||
| check_trainer_kv_type('default', 'row_sparse', True) | ||
| check_trainer_kv_type('row_sparse', 'row_sparse', True) | ||
| print('worker ' + str(my_rank) + ' passed test_gluon_trainer') | ||
|
|
||
| print('worker ' + str(my_rank) + ' passed test_gluon_trainer_type') | ||
|
|
||
| def test_gluon_trainer_step(): | ||
| def check_trainer_step(): | ||
| ctx = mx.cpu(0) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is this on purpose? We run our KVstore tests on a GPU instance. if we don't require GPU, please downgrade to using a cpu instance. |
||
| shape = (10, 1) | ||
| x = mx.gluon.Parameter('x', shape=shape) | ||
| x.initialize(ctx=ctx, init='ones') | ||
| trainer = mx.gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'multi_precision': False}, kvstore=kv) | ||
| with mx.autograd.record(): | ||
| w = x.data(ctx) | ||
| y = (my_rank + 1) * w | ||
| y.backward() | ||
| trainer.step(1) | ||
| expected = 1 - (1 + nworker) * nworker / 2 | ||
| assert_almost_equal(x.data(ctx).asnumpy(), np.full(shape, expected)) | ||
| check_trainer_step() | ||
| print('worker ' + str(my_rank) + ' passed test_gluon_trainer_step') | ||
|
|
||
| def test_gluon_trainer_sparse_step(): | ||
| def check_trainer_sparse_step(): | ||
| ctx = mx.cpu(0) | ||
| shape = (2, 10) | ||
| all_rows = mx.nd.arange(0, shape[0], ctx=ctx) | ||
| x = mx.gluon.Parameter('x', shape=shape, stype='row_sparse', grad_stype='row_sparse') | ||
| x.initialize(ctx=ctx, init='ones') | ||
| trainer = mx.gluon.Trainer([x], 'sgd', {'learning_rate': 1.0}, kvstore=kv) | ||
| with mx.autograd.record(): | ||
| w = x.row_sparse_data(all_rows) | ||
| y = (my_rank + 1) * w | ||
| y.backward() | ||
| trainer.step(1) | ||
| expected = 1 - (1 + nworker) * nworker / 2 | ||
| assert_almost_equal(x.row_sparse_data(all_rows).asnumpy(), np.full(shape, expected)) | ||
| check_trainer_sparse_step() | ||
| print('worker ' + str(my_rank) + ' passed test_gluon_trainer_sparse_step') | ||
|
|
||
| if __name__ == "__main__": | ||
| parser = argparse.ArgumentParser(description='test distributed kvstore in dist_sync mode') | ||
| parser.add_argument('--nrepeat', type=int, default=7) | ||
| parser.add_argument('--type', type=str, default='all') | ||
| parser.add_argument('--type', type=str, default='default_cpu') | ||
| parser.add_argument('--no-gpu', dest='gpu', action='store_false') | ||
| parser.add_argument('--no-multiprecision', dest='multiprecision', action='store_false') | ||
| opt = parser.parse_args() | ||
| if opt.type == 'gluon': | ||
| test_gluon_trainer() | ||
| if opt.type == 'invalid': | ||
| if opt.type == 'gluon_type_cpu': | ||
| test_gluon_trainer_type() | ||
| elif opt.type == 'gluon_step_cpu': | ||
| test_gluon_trainer_step() | ||
| elif opt.type == 'gluon_sparse_step_cpu': | ||
| test_gluon_trainer_sparse_step() | ||
| elif opt.type == 'invalid_cpu': | ||
| test_invalid_operations() | ||
| if opt.type == 'all' or opt.type == 'init': | ||
| elif opt.type == 'init_gpu': | ||
| test_sync_init(opt.gpu) | ||
| if opt.type == 'all' or opt.type == 'default': | ||
| elif opt.type == 'default_cpu': | ||
| kv = init_kv() | ||
| kv = set_optimizer(use_multiprecision=opt.multiprecision) | ||
| test_sync_push_pull(opt.nrepeat) | ||
| # dont run non compressed tests after this as kvstore compression will be set here | ||
| if opt.type == 'all' or opt.type == 'compressed': | ||
| kv = init_kv() | ||
| elif opt.type == 'compressed_cpu': | ||
| kv, threshold = init_kv_compressed(kv) | ||
| kv = set_optimizer(use_multiprecision=opt.multiprecision) | ||
| test_sync_2bit_compression(threshold, opt.nrepeat) | ||
| else: | ||
| raise RuntimeError("Unknown test type") | ||
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
awesome!