From 3c3b7fb40f8a2184887ed0f462ab6124f4613534 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Tue, 22 Dec 2020 12:56:39 +0800 Subject: [PATCH 01/55] bug fixing: not displacing input doc on github --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 778277ffe9..2ce48317fd 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ The typical procedure of using DeePMD-kit includes 5 steps A quick-start on using DeePMD-kit can be found [here](doc/use-deepmd-kit.md). -A full [document](doc/train-input.rst) on options in the training input script is available. +A full [document](doc/train-input-auto.rst) on options in the training input script is available. # Troubleshooting From 45d70baf85378a7c18777aab7b54d761b3317d22 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Tue, 22 Dec 2020 12:56:39 +0800 Subject: [PATCH 02/55] bug fixing: not displaying input doc on github --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 778277ffe9..2ce48317fd 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ The typical procedure of using DeePMD-kit includes 5 steps A quick-start on using DeePMD-kit can be found [here](doc/use-deepmd-kit.md). -A full [document](doc/train-input.rst) on options in the training input script is available. +A full [document](doc/train-input-auto.rst) on options in the training input script is available. # Troubleshooting From b83d4c8fb37c6a5353db10493fb88bf98ed419e6 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Fri, 1 Jan 2021 17:10:53 +0800 Subject: [PATCH 03/55] add doc for short-range tabulated interaction --- doc/train-input-auto.rst | 38 +++++++++++++++++++++++++++++++++++++- source/train/argcheck.py | 9 +++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/doc/train-input-auto.rst b/doc/train-input-auto.rst index e29dd5799b..c7dbe1a9e0 100644 --- a/doc/train-input-auto.rst +++ b/doc/train-input-auto.rst @@ -23,6 +23,42 @@ model: The model determines the normalization from the statistics of the data. This key specifies the number of `frames` in each `system` used for statistics. + .. raw:: html + + + use_srtab: + | type: ``str``, optional + | argument path: ``model/use_srtab`` + + The table for the short-range pairwise interaction added on top of DP. The table is a text data file with (N_t + 1) * N_t / 2 + 1 columes. The first colume is the distance between atoms. The second to the last columes are energies for pairs of certain types. For example we have two atom types, 0 and 1. The columes from 2nd to 4th are for 0-0, 0-1 and 1-1 correspondingly. + + .. raw:: html + + + smin_alpha: + | type: ``float``, optional + | argument path: ``model/smin_alpha`` + + The short-range tabulated interaction will be swithed according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided. + + .. raw:: html + + + sw_rmin: + | type: ``float``, optional + | argument path: ``model/sw_rmin`` + + The lower boundary of the interpolation between short-range tabulated interaction and DP. It is only required when `use_srtab` is provided. + + .. raw:: html + + + sw_rmax: + | type: ``float``, optional + | argument path: ``model/sw_rmax`` + + The upper boundary of the interpolation between short-range tabulated interaction and DP. It is only required when `use_srtab` is provided. + .. raw:: html @@ -962,7 +998,7 @@ training: numb_test: - | type: ``int``, optional, default: ``1`` + | type: ``int`` | ``list`` | ``str``, optional, default: ``1`` | argument path: ``training/numb_test`` Number of frames used for the test during training. diff --git a/source/train/argcheck.py b/source/train/argcheck.py index d87d41857c..032d71ad17 100644 --- a/source/train/argcheck.py +++ b/source/train/argcheck.py @@ -223,9 +223,18 @@ def model_args (): doc_data_stat_nbatch = 'The model determines the normalization from the statistics of the data. This key specifies the number of `frames` in each `system` used for statistics.' doc_descrpt = 'The descriptor of atomic environment.' doc_fitting = 'The fitting of physical properties.' + doc_use_srtab = 'The table for the short-range pairwise interaction added on top of DP. The table is a text data file with (N_t + 1) * N_t / 2 + 1 columes. The first colume is the distance between atoms. The second to the last columes are energies for pairs of certain types. For example we have two atom types, 0 and 1. The columes from 2nd to 4th are for 0-0, 0-1 and 1-1 correspondingly.' + doc_smin_alpha = 'The short-range tabulated interaction will be swithed according to the distance of the nearest neighbor. This distance is calculated by softmin. This parameter is the decaying parameter in the softmin. It is only required when `use_srtab` is provided.' + doc_sw_rmin = 'The lower boundary of the interpolation between short-range tabulated interaction and DP. It is only required when `use_srtab` is provided.' + doc_sw_rmax = 'The upper boundary of the interpolation between short-range tabulated interaction and DP. It is only required when `use_srtab` is provided.' + ca = Argument("model", dict, [Argument("type_map", list, optional = True, doc = doc_type_map), Argument("data_stat_nbatch", int, optional = True, default = 10, doc = doc_data_stat_nbatch), + Argument("use_srtab", str, optional = True, doc = doc_use_srtab), + Argument("smin_alpha", float, optional = True, doc = doc_smin_alpha), + Argument("sw_rmin", float, optional = True, doc = doc_sw_rmin), + Argument("sw_rmax", float, optional = True, doc = doc_sw_rmax), Argument("descriptor", dict, [], [descrpt_variant_type_args()], doc = doc_descrpt), Argument("fitting_net", dict, [], [fitting_variant_type_args()], doc = doc_fitting) ]) From 468b5fe25635dcc1f0e5f5fbe76eb344efbf8267 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Tue, 12 Jan 2021 10:07:38 +0800 Subject: [PATCH 04/55] fix bug of compulsory key `loss` --- source/train/argcheck.py | 1 + 1 file changed, 1 insertion(+) diff --git a/source/train/argcheck.py b/source/train/argcheck.py index 032d71ad17..c358c114ff 100644 --- a/source/train/argcheck.py +++ b/source/train/argcheck.py @@ -299,6 +299,7 @@ def loss_args(): doc_loss = 'The definition of loss function. The type of the loss depends on the type of the fitting. For fitting type `ener`, the prefactors before energy, force, virial and atomic energy losses may be provided. For fitting type `dipole`, `polar` and `global_polar`, the loss may be an empty `dict` or unset.' ca = Argument('loss', dict, [], [loss_variant_type_args()], + optional = True, doc = doc_loss) return ca From e6a3ce6be198338f96af6e0142921e9ced9d7e22 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Tue, 12 Jan 2021 10:39:02 +0800 Subject: [PATCH 05/55] fix bug of compulsory lable requirement --- source/train/Loss.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/source/train/Loss.py b/source/train/Loss.py index 1f336325a3..68fd8dd660 100644 --- a/source/train/Loss.py +++ b/source/train/Loss.py @@ -38,11 +38,11 @@ def __init__ (self, jdata, **kwarg) : self.has_ae = (self.start_pref_ae != 0 or self.limit_pref_ae != 0) self.has_pf = (self.start_pref_pf != 0 or self.limit_pref_pf != 0) # data required - add_data_requirement('energy', 1, atomic=False, must=self.has_e, high_prec=True) - add_data_requirement('force', 3, atomic=True, must=self.has_f, high_prec=False) - add_data_requirement('virial', 9, atomic=False, must=self.has_v, high_prec=False) - add_data_requirement('atom_ener', 1, atomic=True, must=self.has_ae, high_prec=False) - add_data_requirement('atom_pref', 1, atomic=True, must=self.has_pf, high_prec=False, repeat=3) + add_data_requirement('energy', 1, atomic=False, must=False, high_prec=True) + add_data_requirement('force', 3, atomic=True, must=False, high_prec=False) + add_data_requirement('virial', 9, atomic=False, must=False, high_prec=False) + add_data_requirement('atom_ener', 1, atomic=True, must=False, high_prec=False) + add_data_requirement('atom_pref', 1, atomic=True, must=False, high_prec=False, repeat=3) def build (self, learning_rate, From 2b21c22367a6ebf9417d2fadb0b7425eac820c26 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Tue, 12 Jan 2021 11:41:03 +0800 Subject: [PATCH 06/55] add notice for the consistency of the tf version --- doc/install.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/install.md b/doc/install.md index 5e00d3275a..05a0d3bdb0 100644 --- a/doc/install.md +++ b/doc/install.md @@ -63,7 +63,8 @@ source $tensorflow_venv/bin/activate pip install --upgrade pip pip install --upgrade tensorflow==2.3.0 ``` -It is notice that everytime a new shell is started and one wants to use `DeePMD-kit`, the virtual environment should be activated by +It is highly recommanded to keep the consistency of the TensorFlow version for the python and C++ interfaces. +Everytime a new shell is started and one wants to use `DeePMD-kit`, the virtual environment should be activated by ```bash source $tensorflow_venv/bin/activate ``` From fe3a7ec477960342725bc87a82cfdf66c521fde9 Mon Sep 17 00:00:00 2001 From: denghuilu Date: Tue, 26 Jan 2021 12:46:00 +0800 Subject: [PATCH 07/55] fix bug of nbor sorting When the number of sel is smaller than the lammps nbors, the program may have a gpu sorting error. --- source/op/cuda/descrpt_se_a.cu | 95 +------------------------- source/op/cuda/descrpt_se_r.cu | 95 +------------------------- source/op/descrpt_se_a_multi_device.cc | 8 +-- source/op/descrpt_se_r_multi_device.cc | 8 +-- 4 files changed, 8 insertions(+), 198 deletions(-) diff --git a/source/op/cuda/descrpt_se_a.cu b/source/op/cuda/descrpt_se_a.cu index 5965254111..222c24ff49 100644 --- a/source/op/cuda/descrpt_se_a.cu +++ b/source/op/cuda/descrpt_se_a.cu @@ -228,73 +228,6 @@ __global__ void compute_descriptor_se_a (FPTYPE* descript, } } -template -void format_nbor_list_256 ( - const FPTYPE* coord, - const int* type, - const int* jrange, - const int* jlist, - const int& nloc, - const float& rcut_r, - int * i_idx, - int_64 * key -) -{ - const int LEN = 256; - const int MAGIC_NUMBER = 256; - const int nblock = (MAGIC_NUMBER + LEN - 1) / LEN; - dim3 block_grid(nloc, nblock); - dim3 thread_grid(1, LEN); - format_nlist_fill_a_se_a - <<>> ( - coord, - type, - jrange, - jlist, - rcut_r, - key, - i_idx, - MAGIC_NUMBER - ); - const int ITEMS_PER_THREAD = 4; - const int BLOCK_THREADS = MAGIC_NUMBER / ITEMS_PER_THREAD; - // BlockSortKernel<<>> ( - BlockSortKernel <<>> (key, key + nloc * MAGIC_NUMBER); -} - -template -void format_nbor_list_512 ( - const FPTYPE* coord, - const int* type, - const int* jrange, - const int* jlist, - const int& nloc, - const float& rcut_r, - int * i_idx, - int_64 * key -) -{ - const int LEN = 256; - const int MAGIC_NUMBER = 512; - const int nblock = (MAGIC_NUMBER + LEN - 1) / LEN; - dim3 block_grid(nloc, nblock); - dim3 thread_grid(1, LEN); - format_nlist_fill_a_se_a - <<>> ( - coord, - type, - jrange, - jlist, - rcut_r, - key, - i_idx, - MAGIC_NUMBER - ); - const int ITEMS_PER_THREAD = 4; - const int BLOCK_THREADS = MAGIC_NUMBER / ITEMS_PER_THREAD; - // BlockSortKernel<<>> ( - BlockSortKernel <<>> (key, key + nloc * MAGIC_NUMBER); -} template void format_nbor_list_1024 ( @@ -419,29 +352,7 @@ void DescrptSeAGPUExecuteFunctor::operator()(const FPTYPE * coord, const // cudaProfilerStart(); get_i_idx_se_a<<>> (nloc, ilist, i_idx); - if (nnei <= 256) { - format_nbor_list_256 ( - coord, - type, - jrange, - jlist, - nloc, - rcut_r, - i_idx, - key - ); - } else if (nnei <= 512) { - format_nbor_list_512 ( - coord, - type, - jrange, - jlist, - nloc, - rcut_r, - i_idx, - key - ); - } else if (nnei <= 1024) { + if (MAGIC_NUMBER <= 1024) { format_nbor_list_1024 ( coord, type, @@ -452,7 +363,7 @@ void DescrptSeAGPUExecuteFunctor::operator()(const FPTYPE * coord, const i_idx, key ); - } else if (nnei <= 2048) { + } else if (MAGIC_NUMBER <= 2048) { format_nbor_list_2048 ( coord, type, @@ -463,7 +374,7 @@ void DescrptSeAGPUExecuteFunctor::operator()(const FPTYPE * coord, const i_idx, key ); - } else if (nnei <= 4096) { + } else if (MAGIC_NUMBER <= 4096) { format_nbor_list_4096 ( coord, type, diff --git a/source/op/cuda/descrpt_se_r.cu b/source/op/cuda/descrpt_se_r.cu index a65ba5887a..17e76daae2 100644 --- a/source/op/cuda/descrpt_se_r.cu +++ b/source/op/cuda/descrpt_se_r.cu @@ -210,73 +210,6 @@ __global__ void compute_descriptor_se_r (FPTYPE* descript, } } -template -void format_nbor_list_256 ( - const FPTYPE* coord, - const int* type, - const int* jrange, - const int* jlist, - const int& nloc, - const float& rcut_r, - int * i_idx, - int_64 * key -) -{ - const int LEN = 256; - const int MAGIC_NUMBER = 256; - const int nblock = (MAGIC_NUMBER + LEN - 1) / LEN; - dim3 block_grid(nloc, nblock); - dim3 thread_grid(1, LEN); - format_nlist_fill_a_se_r - <<>> ( - coord, - type, - jrange, - jlist, - rcut_r, - key, - i_idx, - MAGIC_NUMBER - ); - const int ITEMS_PER_THREAD = 4; - const int BLOCK_THREADS = MAGIC_NUMBER / ITEMS_PER_THREAD; - // BlockSortKernel<<>> ( - BlockSortKernel <<>> (key, key + nloc * MAGIC_NUMBER); -} - -template -void format_nbor_list_512 ( - const FPTYPE* coord, - const int* type, - const int* jrange, - const int* jlist, - const int& nloc, - const float& rcut_r, - int * i_idx, - int_64 * key -) -{ - const int LEN = 256; - const int MAGIC_NUMBER = 512; - const int nblock = (MAGIC_NUMBER + LEN - 1) / LEN; - dim3 block_grid(nloc, nblock); - dim3 thread_grid(1, LEN); - format_nlist_fill_a_se_r - <<>> ( - coord, - type, - jrange, - jlist, - rcut_r, - key, - i_idx, - MAGIC_NUMBER - ); - const int ITEMS_PER_THREAD = 4; - const int BLOCK_THREADS = MAGIC_NUMBER / ITEMS_PER_THREAD; - // BlockSortKernel<<>> ( - BlockSortKernel <<>> (key, key + nloc * MAGIC_NUMBER); -} template void format_nbor_list_1024 ( @@ -401,29 +334,7 @@ void DescrptSeRGPUExecuteFunctor::operator()(const FPTYPE * coord, const // cudaProfilerStart(); get_i_idx_se_r<<>> (nloc, ilist, i_idx); - if (nnei <= 256) { - format_nbor_list_256 ( - coord, - type, - jrange, - jlist, - nloc, - rcut_r, - i_idx, - key - ); - } else if (nnei <= 512) { - format_nbor_list_512 ( - coord, - type, - jrange, - jlist, - nloc, - rcut_r, - i_idx, - key - ); - } else if (nnei <= 1024) { + if (MAGIC_NUMBER <= 1024) { format_nbor_list_1024 ( coord, type, @@ -434,7 +345,7 @@ void DescrptSeRGPUExecuteFunctor::operator()(const FPTYPE * coord, const i_idx, key ); - } else if (nnei <= 2048) { + } else if (MAGIC_NUMBER <= 2048) { format_nbor_list_2048 ( coord, type, @@ -445,7 +356,7 @@ void DescrptSeRGPUExecuteFunctor::operator()(const FPTYPE * coord, const i_idx, key ); - } else if (nnei <= 4096) { + } else if (MAGIC_NUMBER <= 4096) { format_nbor_list_4096 ( coord, type, diff --git a/source/op/descrpt_se_a_multi_device.cc b/source/op/descrpt_se_a_multi_device.cc index ae5e623171..914e611e2d 100644 --- a/source/op/descrpt_se_a_multi_device.cc +++ b/source/op/descrpt_se_a_multi_device.cc @@ -271,13 +271,7 @@ class DescrptSeAOp : public OpKernel { } int get_magic_number(int const nnei) { - if (nnei <= 256) { - return 256; - } - else if (nnei <= 512) { - return 512; - } - else if (nnei <= 1024) { + if (nnei <= 1024) { return 1024; } else if (nnei <= 2048) { diff --git a/source/op/descrpt_se_r_multi_device.cc b/source/op/descrpt_se_r_multi_device.cc index c5eaff616c..07a9618270 100644 --- a/source/op/descrpt_se_r_multi_device.cc +++ b/source/op/descrpt_se_r_multi_device.cc @@ -261,13 +261,7 @@ class DescrptSeROp : public OpKernel { } int get_magic_number(int const nnei) { - if (nnei <= 256) { - return 256; - } - else if (nnei <= 512) { - return 512; - } - else if (nnei <= 1024) { + if (nnei <= 1024) { return 1024; } else if (nnei <= 2048) { From 3c9dfc49c43e09f90bf3c955710dd64cc9ce1144 Mon Sep 17 00:00:00 2001 From: denghuilu Date: Tue, 26 Jan 2021 17:07:05 +0800 Subject: [PATCH 08/55] use lammps max_nbor_size as the upper boundary of gpu sorting --- source/lib/include/CustomeOperation.h | 12 +++--- source/op/cuda/descrpt_se_a.cu | 54 +++++++++++++------------- source/op/cuda/descrpt_se_r.cu | 54 +++++++++++++------------- source/op/descrpt_se_a_multi_device.cc | 34 +++++++--------- source/op/descrpt_se_r_multi_device.cc | 35 +++++++---------- 5 files changed, 88 insertions(+), 101 deletions(-) diff --git a/source/lib/include/CustomeOperation.h b/source/lib/include/CustomeOperation.h index c446db8130..f7fd7c2496 100644 --- a/source/lib/include/CustomeOperation.h +++ b/source/lib/include/CustomeOperation.h @@ -169,7 +169,7 @@ void compute_descriptor_se_a_cpu ( } template -void DescrptSeACPULauncher(const FPTYPE * coord, const int * type, const int * ilist, const int * jrange, const int * jlist, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descrpt, FPTYPE * descrpt_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ntypes, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int magic_number) { +void DescrptSeACPULauncher(const FPTYPE * coord, const int * type, const int * ilist, const int * jrange, const int * jlist, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descrpt, FPTYPE * descrpt_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ntypes, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int max_nbor_size) { // set & normalize coord std::vector d_coord3(nall * 3); for (int ii = 0; ii < nall; ++ii) { @@ -235,8 +235,8 @@ void DescrptSeACPULauncher(const FPTYPE * coord, const int * type, const int * i #if GOOGLE_CUDA template -void DescrptSeAGPULauncher(const FPTYPE * coord, const int * type, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descrpt, FPTYPE * descrpt_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int magic_number) { - DescrptSeAGPUExecuteFunctor()(coord, type, ilist, jrange, jlist, array_int, array_longlong, avg, std, descrpt, descrpt_deriv, rij, nlist, nloc, nall, nnei, ndescrpt, rcut_r, rcut_r_smth, sec_a, fill_nei_a, magic_number); +void DescrptSeAGPULauncher(const FPTYPE * coord, const int * type, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descrpt, FPTYPE * descrpt_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int max_nbor_size) { + DescrptSeAGPUExecuteFunctor()(coord, type, ilist, jrange, jlist, array_int, array_longlong, avg, std, descrpt, descrpt_deriv, rij, nlist, nloc, nall, nnei, ndescrpt, rcut_r, rcut_r_smth, sec_a, fill_nei_a, max_nbor_size); } #endif // GOOGLE_CUDA // ****************************************************************************** @@ -432,7 +432,7 @@ void compute_descriptor_se_r_cpu ( } template -void DescrptSeRCPULauncher(const FPTYPE * coord, const int * type, const int * ilist, const int * jrange, const int * jlist, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descrpt, FPTYPE * descrpt_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ntypes, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int magic_number) { +void DescrptSeRCPULauncher(const FPTYPE * coord, const int * type, const int * ilist, const int * jrange, const int * jlist, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descrpt, FPTYPE * descrpt_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ntypes, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int max_nbor_size) { // set & normalize coord std::vector d_coord3(nall * 3); for (int ii = 0; ii < nall; ++ii) { @@ -498,8 +498,8 @@ void DescrptSeRCPULauncher(const FPTYPE * coord, const int * type, const int * i #if GOOGLE_CUDA template -void DescrptSeRGPULauncher(const FPTYPE * coord, const int * type, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descrpt, FPTYPE * descrpt_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int magic_number) { - DescrptSeRGPUExecuteFunctor()(coord, type, ilist, jrange, jlist, array_int, array_longlong, avg, std, descrpt, descrpt_deriv, rij, nlist, nloc, nall, nnei, ndescrpt, rcut_r, rcut_r_smth, sec_a, fill_nei_a, magic_number); +void DescrptSeRGPULauncher(const FPTYPE * coord, const int * type, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descrpt, FPTYPE * descrpt_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int max_nbor_size) { + DescrptSeRGPUExecuteFunctor()(coord, type, ilist, jrange, jlist, array_int, array_longlong, avg, std, descrpt, descrpt_deriv, rij, nlist, nloc, nall, nnei, ndescrpt, rcut_r, rcut_r_smth, sec_a, fill_nei_a, max_nbor_size); } #endif // GOOGLE_CUDA // ****************************************************************************** diff --git a/source/op/cuda/descrpt_se_a.cu b/source/op/cuda/descrpt_se_a.cu index 222c24ff49..a528c4c477 100644 --- a/source/op/cuda/descrpt_se_a.cu +++ b/source/op/cuda/descrpt_se_a.cu @@ -84,9 +84,9 @@ __global__ void format_nlist_fill_a_se_a(const FPTYPE * coord, const float rcut, int_64 * key, int * i_idx, - const int MAGIC_NUMBER) + const int MAX_NBOR_SIZE) { - // <<>> + // <<>> const unsigned int idx = blockIdx.x; const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; @@ -98,7 +98,7 @@ __global__ void format_nlist_fill_a_se_a(const FPTYPE * coord, const int * nei_idx = jlist + jrange[i_idx[idx]]; // dev_copy(nei_idx, &jlist[jrange[i_idx]], nsize); - int_64 * key_in = key + idx * MAGIC_NUMBER; + int_64 * key_in = key + idx * MAX_NBOR_SIZE; FPTYPE diff[3]; const int & j_idx = nei_idx[idy]; @@ -121,7 +121,7 @@ __global__ void format_nlist_fill_b_se_a(int * nlist, const int * sec_a, const int sec_a_size, int * nei_iter_dev, - const int MAGIC_NUMBER) + const int MAX_NBOR_SIZE) { const unsigned int idy = blockIdx.x * blockDim.x + threadIdx.x; @@ -132,13 +132,13 @@ __global__ void format_nlist_fill_b_se_a(int * nlist, int * row_nlist = nlist + idy * nlist_size; int * nei_iter = nei_iter_dev + idy * sec_a_size; - int_64 * key_out = key + nloc * MAGIC_NUMBER + idy * MAGIC_NUMBER; + int_64 * key_out = key + nloc * MAX_NBOR_SIZE + idy * MAX_NBOR_SIZE; for (int ii = 0; ii < sec_a_size; ii++) { nei_iter[ii] = sec_a[ii]; } - for (unsigned int kk = 0; key_out[kk] != key_out[MAGIC_NUMBER - 1]; kk++) { + for (unsigned int kk = 0; key_out[kk] != key_out[MAX_NBOR_SIZE - 1]; kk++) { const int & nei_type = key_out[kk] / 1E15; if (nei_iter[nei_type] < sec_a[nei_type + 1]) { row_nlist[nei_iter[nei_type]++] = key_out[kk] % 100000; @@ -242,8 +242,8 @@ void format_nbor_list_1024 ( ) { const int LEN = 256; - const int MAGIC_NUMBER = 1024; - const int nblock = (MAGIC_NUMBER + LEN - 1) / LEN; + const int MAX_NBOR_SIZE = 1024; + const int nblock = (MAX_NBOR_SIZE + LEN - 1) / LEN; dim3 block_grid(nloc, nblock); dim3 thread_grid(1, LEN); format_nlist_fill_a_se_a @@ -255,12 +255,12 @@ void format_nbor_list_1024 ( rcut_r, key, i_idx, - MAGIC_NUMBER + MAX_NBOR_SIZE ); const int ITEMS_PER_THREAD = 8; - const int BLOCK_THREADS = MAGIC_NUMBER / ITEMS_PER_THREAD; + const int BLOCK_THREADS = MAX_NBOR_SIZE / ITEMS_PER_THREAD; // BlockSortKernel<<>> ( - BlockSortKernel <<>> (key, key + nloc * MAGIC_NUMBER); + BlockSortKernel <<>> (key, key + nloc * MAX_NBOR_SIZE); } template @@ -276,8 +276,8 @@ void format_nbor_list_2048 ( ) { const int LEN = 256; - const int MAGIC_NUMBER = 2048; - const int nblock = (MAGIC_NUMBER + LEN - 1) / LEN; + const int MAX_NBOR_SIZE = 2048; + const int nblock = (MAX_NBOR_SIZE + LEN - 1) / LEN; dim3 block_grid(nloc, nblock); dim3 thread_grid(1, LEN); format_nlist_fill_a_se_a @@ -289,12 +289,12 @@ void format_nbor_list_2048 ( rcut_r, key, i_idx, - MAGIC_NUMBER + MAX_NBOR_SIZE ); const int ITEMS_PER_THREAD = 8; - const int BLOCK_THREADS = MAGIC_NUMBER / ITEMS_PER_THREAD; + const int BLOCK_THREADS = MAX_NBOR_SIZE / ITEMS_PER_THREAD; // BlockSortKernel<<>> ( - BlockSortKernel <<>> (key, key + nloc * MAGIC_NUMBER); + BlockSortKernel <<>> (key, key + nloc * MAX_NBOR_SIZE); } template @@ -310,8 +310,8 @@ void format_nbor_list_4096 ( ) { const int LEN = 256; - const int MAGIC_NUMBER = 4096; - const int nblock = (MAGIC_NUMBER + LEN - 1) / LEN; + const int MAX_NBOR_SIZE = 4096; + const int nblock = (MAX_NBOR_SIZE + LEN - 1) / LEN; dim3 block_grid(nloc, nblock); dim3 thread_grid(1, LEN); format_nlist_fill_a_se_a @@ -323,16 +323,16 @@ void format_nbor_list_4096 ( rcut_r, key, i_idx, - MAGIC_NUMBER + MAX_NBOR_SIZE ); const int ITEMS_PER_THREAD = 16; - const int BLOCK_THREADS = MAGIC_NUMBER / ITEMS_PER_THREAD; + const int BLOCK_THREADS = MAX_NBOR_SIZE / ITEMS_PER_THREAD; // BlockSortKernel<<>> ( - BlockSortKernel <<>> (key, key + nloc * MAGIC_NUMBER); + BlockSortKernel <<>> (key, key + nloc * MAX_NBOR_SIZE); } template -void DescrptSeAGPUExecuteFunctor::operator()(const FPTYPE * coord, const int * type, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descript, FPTYPE * descript_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int MAGIC_NUMBER) { +void DescrptSeAGPUExecuteFunctor::operator()(const FPTYPE * coord, const int * type, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descript, FPTYPE * descript_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int max_nbor_size) { const int LEN = 256; int nblock = (nloc + LEN -1) / LEN; int * sec_a_dev = array_int; @@ -342,7 +342,7 @@ void DescrptSeAGPUExecuteFunctor::operator()(const FPTYPE * coord, const cudaError_t res = cudaSuccess; res = cudaMemcpy(sec_a_dev, &sec_a[0], sizeof(int) * sec_a.size(), cudaMemcpyHostToDevice); cudaErrcheck(res); - res = cudaMemset(key, 0xffffffff, sizeof(int_64) * nloc * MAGIC_NUMBER); cudaErrcheck(res); + res = cudaMemset(key, 0xffffffff, sizeof(int_64) * nloc * max_nbor_size); cudaErrcheck(res); res = cudaMemset(nlist, -1, sizeof(int) * nloc * nnei); cudaErrcheck(res); res = cudaMemset(descript, 0.0, sizeof(FPTYPE) * nloc * ndescrpt); cudaErrcheck(res); res = cudaMemset(descript_deriv, 0.0, sizeof(FPTYPE) * nloc * ndescrpt * 3); cudaErrcheck(res); @@ -352,7 +352,7 @@ void DescrptSeAGPUExecuteFunctor::operator()(const FPTYPE * coord, const // cudaProfilerStart(); get_i_idx_se_a<<>> (nloc, ilist, i_idx); - if (MAGIC_NUMBER <= 1024) { + if (max_nbor_size <= 1024) { format_nbor_list_1024 ( coord, type, @@ -363,7 +363,7 @@ void DescrptSeAGPUExecuteFunctor::operator()(const FPTYPE * coord, const i_idx, key ); - } else if (MAGIC_NUMBER <= 2048) { + } else if (max_nbor_size <= 2048) { format_nbor_list_2048 ( coord, type, @@ -374,7 +374,7 @@ void DescrptSeAGPUExecuteFunctor::operator()(const FPTYPE * coord, const i_idx, key ); - } else if (MAGIC_NUMBER <= 4096) { + } else if (max_nbor_size <= 4096) { format_nbor_list_4096 ( coord, type, @@ -397,7 +397,7 @@ void DescrptSeAGPUExecuteFunctor::operator()(const FPTYPE * coord, const sec_a_dev, sec_a.size(), nei_iter, - MAGIC_NUMBER + max_nbor_size ); } diff --git a/source/op/cuda/descrpt_se_r.cu b/source/op/cuda/descrpt_se_r.cu index 17e76daae2..0715f19c5e 100644 --- a/source/op/cuda/descrpt_se_r.cu +++ b/source/op/cuda/descrpt_se_r.cu @@ -84,9 +84,9 @@ __global__ void format_nlist_fill_a_se_r(const FPTYPE * coord, const float rcut, int_64 * key, int * i_idx, - const int MAGIC_NUMBER) + const int MAX_NBOR_SIZE) { - // <<>> + // <<>> const unsigned int idx = blockIdx.x; const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; @@ -98,7 +98,7 @@ __global__ void format_nlist_fill_a_se_r(const FPTYPE * coord, const int * nei_idx = jlist + jrange[i_idx[idx]]; // dev_copy(nei_idx, &jlist[jrange[i_idx]], nsize); - int_64 * key_in = key + idx * MAGIC_NUMBER; + int_64 * key_in = key + idx * MAX_NBOR_SIZE; FPTYPE diff[3]; const int & j_idx = nei_idx[idy]; @@ -121,7 +121,7 @@ __global__ void format_nlist_fill_b_se_r(int * nlist, const int * sec_a, const int sec_a_size, int * nei_iter_dev, - const int MAGIC_NUMBER) + const int MAX_NBOR_SIZE) { const unsigned int idy = blockIdx.x * blockDim.x + threadIdx.x; @@ -132,13 +132,13 @@ __global__ void format_nlist_fill_b_se_r(int * nlist, int * row_nlist = nlist + idy * nlist_size; int * nei_iter = nei_iter_dev + idy * sec_a_size; - int_64 * key_out = key + nloc * MAGIC_NUMBER + idy * MAGIC_NUMBER; + int_64 * key_out = key + nloc * MAX_NBOR_SIZE + idy * MAX_NBOR_SIZE; for (int ii = 0; ii < sec_a_size; ii++) { nei_iter[ii] = sec_a[ii]; } - for (unsigned int kk = 0; key_out[kk] != key_out[MAGIC_NUMBER - 1]; kk++) { + for (unsigned int kk = 0; key_out[kk] != key_out[MAX_NBOR_SIZE - 1]; kk++) { const int & nei_type = key_out[kk] / 1E15; if (nei_iter[nei_type] < sec_a[nei_type + 1]) { row_nlist[nei_iter[nei_type]++] = key_out[kk] % 100000; @@ -224,8 +224,8 @@ void format_nbor_list_1024 ( ) { const int LEN = 256; - const int MAGIC_NUMBER = 1024; - const int nblock = (MAGIC_NUMBER + LEN - 1) / LEN; + const int MAX_NBOR_SIZE = 1024; + const int nblock = (MAX_NBOR_SIZE + LEN - 1) / LEN; dim3 block_grid(nloc, nblock); dim3 thread_grid(1, LEN); format_nlist_fill_a_se_r @@ -237,12 +237,12 @@ void format_nbor_list_1024 ( rcut_r, key, i_idx, - MAGIC_NUMBER + MAX_NBOR_SIZE ); const int ITEMS_PER_THREAD = 8; - const int BLOCK_THREADS = MAGIC_NUMBER / ITEMS_PER_THREAD; + const int BLOCK_THREADS = MAX_NBOR_SIZE / ITEMS_PER_THREAD; // BlockSortKernel<<>> ( - BlockSortKernel <<>> (key, key + nloc * MAGIC_NUMBER); + BlockSortKernel <<>> (key, key + nloc * MAX_NBOR_SIZE); } template @@ -258,8 +258,8 @@ void format_nbor_list_2048 ( ) { const int LEN = 256; - const int MAGIC_NUMBER = 2048; - const int nblock = (MAGIC_NUMBER + LEN - 1) / LEN; + const int MAX_NBOR_SIZE = 2048; + const int nblock = (MAX_NBOR_SIZE + LEN - 1) / LEN; dim3 block_grid(nloc, nblock); dim3 thread_grid(1, LEN); format_nlist_fill_a_se_r @@ -271,12 +271,12 @@ void format_nbor_list_2048 ( rcut_r, key, i_idx, - MAGIC_NUMBER + MAX_NBOR_SIZE ); const int ITEMS_PER_THREAD = 8; - const int BLOCK_THREADS = MAGIC_NUMBER / ITEMS_PER_THREAD; + const int BLOCK_THREADS = MAX_NBOR_SIZE / ITEMS_PER_THREAD; // BlockSortKernel<<>> ( - BlockSortKernel <<>> (key, key + nloc * MAGIC_NUMBER); + BlockSortKernel <<>> (key, key + nloc * MAX_NBOR_SIZE); } template @@ -292,8 +292,8 @@ void format_nbor_list_4096 ( ) { const int LEN = 256; - const int MAGIC_NUMBER = 4096; - const int nblock = (MAGIC_NUMBER + LEN - 1) / LEN; + const int MAX_NBOR_SIZE = 4096; + const int nblock = (MAX_NBOR_SIZE + LEN - 1) / LEN; dim3 block_grid(nloc, nblock); dim3 thread_grid(1, LEN); format_nlist_fill_a_se_r @@ -305,16 +305,16 @@ void format_nbor_list_4096 ( rcut_r, key, i_idx, - MAGIC_NUMBER + MAX_NBOR_SIZE ); const int ITEMS_PER_THREAD = 16; - const int BLOCK_THREADS = MAGIC_NUMBER / ITEMS_PER_THREAD; + const int BLOCK_THREADS = MAX_NBOR_SIZE / ITEMS_PER_THREAD; // BlockSortKernel<<>> ( - BlockSortKernel <<>> (key, key + nloc * MAGIC_NUMBER); + BlockSortKernel <<>> (key, key + nloc * MAX_NBOR_SIZE); } template -void DescrptSeRGPUExecuteFunctor::operator()(const FPTYPE * coord, const int * type, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descript, FPTYPE * descript_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int MAGIC_NUMBER) { +void DescrptSeRGPUExecuteFunctor::operator()(const FPTYPE * coord, const int * type, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descript, FPTYPE * descript_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int max_nbor_size) { const int LEN = 256; int nblock = (nloc + LEN -1) / LEN; int * sec_a_dev = array_int; @@ -324,7 +324,7 @@ void DescrptSeRGPUExecuteFunctor::operator()(const FPTYPE * coord, const cudaError_t res = cudaSuccess; res = cudaMemcpy(sec_a_dev, &sec_a[0], sizeof(int) * sec_a.size(), cudaMemcpyHostToDevice); cudaErrcheck(res); - res = cudaMemset(key, 0xffffffff, sizeof(int_64) * nloc * MAGIC_NUMBER); cudaErrcheck(res); + res = cudaMemset(key, 0xffffffff, sizeof(int_64) * nloc * max_nbor_size); cudaErrcheck(res); res = cudaMemset(nlist, -1, sizeof(int) * nloc * nnei); cudaErrcheck(res); res = cudaMemset(descript, 0.0, sizeof(FPTYPE) * nloc * ndescrpt); cudaErrcheck(res); res = cudaMemset(descript_deriv, 0.0, sizeof(FPTYPE) * nloc * ndescrpt * 3); cudaErrcheck(res); @@ -334,7 +334,7 @@ void DescrptSeRGPUExecuteFunctor::operator()(const FPTYPE * coord, const // cudaProfilerStart(); get_i_idx_se_r<<>> (nloc, ilist, i_idx); - if (MAGIC_NUMBER <= 1024) { + if (max_nbor_size <= 1024) { format_nbor_list_1024 ( coord, type, @@ -345,7 +345,7 @@ void DescrptSeRGPUExecuteFunctor::operator()(const FPTYPE * coord, const i_idx, key ); - } else if (MAGIC_NUMBER <= 2048) { + } else if (max_nbor_size <= 2048) { format_nbor_list_2048 ( coord, type, @@ -356,7 +356,7 @@ void DescrptSeRGPUExecuteFunctor::operator()(const FPTYPE * coord, const i_idx, key ); - } else if (MAGIC_NUMBER <= 4096) { + } else if (max_nbor_size <= 4096) { format_nbor_list_4096 ( coord, type, @@ -379,7 +379,7 @@ void DescrptSeRGPUExecuteFunctor::operator()(const FPTYPE * coord, const sec_a_dev, sec_a.size(), nei_iter, - MAGIC_NUMBER + max_nbor_size ); } diff --git a/source/op/descrpt_se_a_multi_device.cc b/source/op/descrpt_se_a_multi_device.cc index 914e611e2d..62676c56a3 100644 --- a/source/op/descrpt_se_a_multi_device.cc +++ b/source/op/descrpt_se_a_multi_device.cc @@ -34,13 +34,13 @@ struct DeviceFunctor { template struct DescrptSeAFunctor { - void operator()(const CPUDevice& d, const FPTYPE * coord, const int * type, const int * mesh, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descrpt, FPTYPE * descrpt_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ntypes, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int magic_number) { - DescrptSeACPULauncher(coord, type, ilist, jrange, jlist, avg, std, descrpt, descrpt_deriv, rij, nlist, nloc, nall, nnei, ntypes, ndescrpt, rcut_r, rcut_r_smth, sec_a, fill_nei_a, magic_number); + void operator()(const CPUDevice& d, const FPTYPE * coord, const int * type, const int * mesh, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descrpt, FPTYPE * descrpt_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ntypes, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int max_nbor_size) { + DescrptSeACPULauncher(coord, type, ilist, jrange, jlist, avg, std, descrpt, descrpt_deriv, rij, nlist, nloc, nall, nnei, ntypes, ndescrpt, rcut_r, rcut_r_smth, sec_a, fill_nei_a, max_nbor_size); } #if GOOGLE_CUDA - void operator()(const GPUDevice& d, const FPTYPE * coord, const int * type, const int * mesh, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descrpt, FPTYPE * descrpt_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ntypes, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int magic_number) { - DescrptSeAGPULauncher(coord, type, ilist, jrange, jlist, array_int, array_longlong, avg, std, descrpt, descrpt_deriv, rij, nlist, nloc, nall, nnei, ndescrpt, rcut_r, rcut_r_smth, sec_a, fill_nei_a, magic_number); + void operator()(const GPUDevice& d, const FPTYPE * coord, const int * type, const int * mesh, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const FPTYPE * avg, const FPTYPE * std, FPTYPE * descrpt, FPTYPE * descrpt_deriv, FPTYPE * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ntypes, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int max_nbor_size) { + DescrptSeAGPULauncher(coord, type, ilist, jrange, jlist, array_int, array_longlong, avg, std, descrpt, descrpt_deriv, rij, nlist, nloc, nall, nnei, ndescrpt, rcut_r, rcut_r_smth, sec_a, fill_nei_a, max_nbor_size); } #endif // GOOGLE_CUDA }; @@ -66,7 +66,7 @@ class DescrptSeAOp : public OpKernel { nnei_r = sec_r.back(); nnei = nnei_a + nnei_r; fill_nei_a = (rcut_a < 0); - magic_number = get_magic_number(nnei); + max_nbor_size = 1024; } void Compute(OpKernelContext* context) override { @@ -117,7 +117,6 @@ class DescrptSeAOp : public OpKernel { OP_REQUIRES (context, (ntypes == int(sel_a.size())), errors::InvalidArgument ("number of types should match the length of sel array")); OP_REQUIRES (context, (ntypes == int(sel_r.size())), errors::InvalidArgument ("number of types should match the length of sel array")); - OP_REQUIRES (context, (nnei <= 4096), errors::InvalidArgument ("Assert failed, max neighbor size of atom(nnei) " + std::to_string(nnei) + " is larger than 4096, which currently is not supported by deepmd-kit.")); // Create output tensors TensorShape descrpt_shape ; @@ -159,13 +158,14 @@ class DescrptSeAOp : public OpKernel { OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, int_shape, &int_temp)); Tensor uint64_temp; TensorShape uint64_shape; - uint64_shape.AddDim(nloc * magic_number * 2); + uint64_shape.AddDim(nloc * max_nbor_size * 2); OP_REQUIRES_OK(context, context->allocate_temp(DT_UINT64, uint64_shape, &uint64_temp)); array_int = int_temp.flat().data(); array_longlong = uint64_temp.flat().data(); nbor_update(mesh_tensor.flat().data(), static_cast(mesh_tensor.NumElements())); + OP_REQUIRES (context, (max_nbor_size <= 4096), errors::InvalidArgument ("Assert failed, max neighbor size of atom(lammps) " + std::to_string(max_nbor_size) + " is larger than 4096, which currently is not supported by deepmd-kit.")); } else if (device == "CPU") { memcpy (&ilist, 4 + mesh_tensor.flat().data(), sizeof(int *)); @@ -198,7 +198,7 @@ class DescrptSeAOp : public OpKernel { rcut_r_smth, sec_a, fill_nei_a, - magic_number + max_nbor_size ); } @@ -212,7 +212,7 @@ class DescrptSeAOp : public OpKernel { std::vector sec_a; std::vector sec_r; int ndescrpt, ndescrpt_a, ndescrpt_r; - int nnei, nnei_a, nnei_r, nloc, nall, magic_number; + int nnei, nnei_a, nnei_r, nloc, nall, max_nbor_size; bool fill_nei_a; //private func @@ -266,21 +266,15 @@ class DescrptSeAOp : public OpKernel { cudaErrcheck(cudaMemcpy(ilist, ilist_host, sizeof(int) * mesh_host[1], cudaMemcpyHostToDevice)); cudaErrcheck(cudaMemcpy(jrange, jrange_host, sizeof(int) * mesh_host[2], cudaMemcpyHostToDevice)); cudaErrcheck(cudaMemcpy(jlist, jlist_host, sizeof(int) * mesh_host[3], cudaMemcpyHostToDevice)); + + max_nbor_size = 1024; + for(int ii = 0; ii < mesh_host[2] - 1; ii++) { + max_nbor_size = (jrange_host[ii + 1] - jrange_host[ii]) > max_nbor_size ? (jrange_host[ii + 1] - jrange_host[ii]) : max_nbor_size; + } } delete [] mesh_host; } - int get_magic_number(int const nnei) { - if (nnei <= 1024) { - return 1024; - } - else if (nnei <= 2048) { - return 2048; - } - else if (nnei <= 4096) { - return 4096; - } - } }; // Register the CPU kernels. diff --git a/source/op/descrpt_se_r_multi_device.cc b/source/op/descrpt_se_r_multi_device.cc index 07a9618270..ba2ec377b7 100644 --- a/source/op/descrpt_se_r_multi_device.cc +++ b/source/op/descrpt_se_r_multi_device.cc @@ -31,13 +31,13 @@ struct DeviceFunctor { template struct DescrptSeRFunctor { - void operator()(const CPUDevice& d, const T * coord, const int * type, const int * mesh, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const T * avg, const T * std, T * descrpt, T * descrpt_deriv, T * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ntypes, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int magic_number) { - DescrptSeRCPULauncher(coord, type, ilist, jrange, jlist, avg, std, descrpt, descrpt_deriv, rij, nlist, nloc, nall, nnei, ntypes, ndescrpt, rcut_r, rcut_r_smth, sec_a, fill_nei_a, magic_number); + void operator()(const CPUDevice& d, const T * coord, const int * type, const int * mesh, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const T * avg, const T * std, T * descrpt, T * descrpt_deriv, T * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ntypes, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int max_nbor_size) { + DescrptSeRCPULauncher(coord, type, ilist, jrange, jlist, avg, std, descrpt, descrpt_deriv, rij, nlist, nloc, nall, nnei, ntypes, ndescrpt, rcut_r, rcut_r_smth, sec_a, fill_nei_a, max_nbor_size); } #if GOOGLE_CUDA - void operator()(const GPUDevice& d, const T * coord, const int * type, const int * mesh, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const T * avg, const T * std, T * descrpt, T * descrpt_deriv, T * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ntypes, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int magic_number) { - DescrptSeRGPULauncher(coord, type, ilist, jrange, jlist, array_int, array_longlong, avg, std, descrpt, descrpt_deriv, rij, nlist, nloc, nall, nnei, ndescrpt, rcut_r, rcut_r_smth, sec_a, fill_nei_a, magic_number); + void operator()(const GPUDevice& d, const T * coord, const int * type, const int * mesh, const int * ilist, const int * jrange, const int * jlist, int * array_int, unsigned long long * array_longlong, const T * avg, const T * std, T * descrpt, T * descrpt_deriv, T * rij, int * nlist, const int nloc, const int nall, const int nnei, const int ntypes, const int ndescrpt, const float rcut_r, const float rcut_r_smth, const std::vector sec_a, const bool fill_nei_a, const int max_nbor_size) { + DescrptSeRGPULauncher(coord, type, ilist, jrange, jlist, array_int, array_longlong, avg, std, descrpt, descrpt_deriv, rij, nlist, nloc, nall, nnei, ndescrpt, rcut_r, rcut_r_smth, sec_a, fill_nei_a, max_nbor_size); } #endif // GOOGLE_CUDA }; @@ -55,9 +55,7 @@ class DescrptSeROp : public OpKernel { ndescrpt = sec.back() * 1; nnei = sec.back(); fill_nei_a = true; - magic_number = get_magic_number(nnei); - // count_nei_idx_overflow = 0; - // std::cout << "I'm in descrpt_se_r_gpu.cc" << std::endl; + max_nbor_size = 1024; } void Compute(OpKernelContext* context) override { @@ -149,13 +147,14 @@ class DescrptSeROp : public OpKernel { OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, int_shape, &int_temp)); Tensor uint64_temp; TensorShape uint64_shape; - uint64_shape.AddDim(nloc * magic_number * 2); + uint64_shape.AddDim(nloc * max_nbor_size * 2); OP_REQUIRES_OK(context, context->allocate_temp(DT_UINT64, uint64_shape, &uint64_temp)); array_int = int_temp.flat().data(); array_longlong = uint64_temp.flat().data(); nbor_update(mesh_tensor.flat().data(), static_cast(mesh_tensor.NumElements())); + OP_REQUIRES (context, (max_nbor_size <= 4096), errors::InvalidArgument ("Assert failed, max neighbor size of atom(lammps) " + std::to_string(max_nbor_size) + " is larger than 4096, which currently is not supported by deepmd-kit.")); } else if (device == "CPU") { memcpy (&ilist, 4 + mesh_tensor.flat().data(), sizeof(int *)); @@ -188,7 +187,7 @@ class DescrptSeROp : public OpKernel { rcut_smth, sec, fill_nei_a, - magic_number + max_nbor_size ); } @@ -213,7 +212,7 @@ class DescrptSeROp : public OpKernel { } } - int magic_number; + int max_nbor_size; std::string device; int *array_int; unsigned long long*array_longlong; @@ -256,21 +255,15 @@ class DescrptSeROp : public OpKernel { cudaErrcheck(cudaMemcpy(ilist, ilist_host, sizeof(int) * mesh_host[1], cudaMemcpyHostToDevice)); cudaErrcheck(cudaMemcpy(jrange, jrange_host, sizeof(int) * mesh_host[2], cudaMemcpyHostToDevice)); cudaErrcheck(cudaMemcpy(jlist, jlist_host, sizeof(int) * mesh_host[3], cudaMemcpyHostToDevice)); + + max_nbor_size = 1024; + for(int ii = 0; ii < mesh_host[2] - 1; ii++) { + max_nbor_size = (jrange_host[ii + 1] - jrange_host[ii]) > max_nbor_size ? (jrange_host[ii + 1] - jrange_host[ii]) : max_nbor_size; + } } delete [] mesh_host; } - int get_magic_number(int const nnei) { - if (nnei <= 1024) { - return 1024; - } - else if (nnei <= 2048) { - return 2048; - } - else if (nnei <= 4096) { - return 4096; - } - } }; // Register the CPU kernels. From 51de5ec5eb1d269616be34c0802e7ddc26f601c4 Mon Sep 17 00:00:00 2001 From: denghuilu Date: Wed, 27 Jan 2021 11:37:59 +0800 Subject: [PATCH 09/55] fix a potential bug --- source/op/descrpt_se_a_multi_device.cc | 2 +- source/op/descrpt_se_r_multi_device.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/op/descrpt_se_a_multi_device.cc b/source/op/descrpt_se_a_multi_device.cc index 62676c56a3..93e2cdccac 100644 --- a/source/op/descrpt_se_a_multi_device.cc +++ b/source/op/descrpt_se_a_multi_device.cc @@ -268,7 +268,7 @@ class DescrptSeAOp : public OpKernel { cudaErrcheck(cudaMemcpy(jlist, jlist_host, sizeof(int) * mesh_host[3], cudaMemcpyHostToDevice)); max_nbor_size = 1024; - for(int ii = 0; ii < mesh_host[2] - 1; ii++) { + for(int ii = 0; ii < mesh_host[2]; ii++) { max_nbor_size = (jrange_host[ii + 1] - jrange_host[ii]) > max_nbor_size ? (jrange_host[ii + 1] - jrange_host[ii]) : max_nbor_size; } } diff --git a/source/op/descrpt_se_r_multi_device.cc b/source/op/descrpt_se_r_multi_device.cc index ba2ec377b7..b94f97d6e1 100644 --- a/source/op/descrpt_se_r_multi_device.cc +++ b/source/op/descrpt_se_r_multi_device.cc @@ -257,7 +257,7 @@ class DescrptSeROp : public OpKernel { cudaErrcheck(cudaMemcpy(jlist, jlist_host, sizeof(int) * mesh_host[3], cudaMemcpyHostToDevice)); max_nbor_size = 1024; - for(int ii = 0; ii < mesh_host[2] - 1; ii++) { + for(int ii = 0; ii < mesh_host[2]; ii++) { max_nbor_size = (jrange_host[ii + 1] - jrange_host[ii]) > max_nbor_size ? (jrange_host[ii + 1] - jrange_host[ii]) : max_nbor_size; } } From a24971f699cc71d577c19f0bf590b9b87194a97a Mon Sep 17 00:00:00 2001 From: hsulab Date: Fri, 29 Jan 2021 22:49:07 +0000 Subject: [PATCH 10/55] fix an error in stress by ase interface --- source/train/calculator.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/source/train/calculator.py b/source/train/calculator.py index de439c3f78..d6d956572c 100644 --- a/source/train/calculator.py +++ b/source/train/calculator.py @@ -24,13 +24,15 @@ ``` """ -from ase.calculators.calculator import Calculator, all_changes +from ase.calculators.calculator import ( + Calculator, all_changes, PropertyNotImplementedError +) import deepmd.DeepPot as DeepPot class DP(Calculator): name = "DP" - implemented_properties = ["energy", "forces", "stress"] + implemented_properties = ["energy", "forces", "virial", "stress"] def __init__(self, model, label="DP", type_dict=None, **kwargs): Calculator.__init__(self, label=label, **kwargs) @@ -40,7 +42,7 @@ def __init__(self, model, label="DP", type_dict=None, **kwargs): else: self.type_dict = dict(zip(self.dp.get_type_map(), range(self.dp.get_ntypes()))) - def calculate(self, atoms=None, properties=["energy", "forces", "stress"], system_changes=all_changes): + def calculate(self, atoms=None, properties=["energy", "forces", "virial"], system_changes=all_changes): coord = atoms.get_positions().reshape([1, -1]) if sum(atoms.get_pbc())>0: cell = atoms.get_cell().reshape([1, -1]) @@ -49,7 +51,17 @@ def calculate(self, atoms=None, properties=["energy", "forces", "stress"], syste symbols = atoms.get_chemical_symbols() atype = [self.type_dict[k] for k in symbols] e, f, v = self.dp.eval(coords=coord, cells=cell, atom_types=atype) - self.results['energy'] = e[0] + self.results['energy'] = e[0][0] self.results['forces'] = f[0] - self.results['stress'] = v[0] + self.results['virial'] = v[0].reshape(3,3) + # convert virial into stress for lattice relaxation + if "stress" in properties: + if sum(atoms.get_pbc()) > 0: + # the usual convention (tensile stress is positive) + # stress = -virial / volume + stress = -0.5*(v[0].copy()+v[0].copy().T) / atoms.get_volume() + # Voigt notation + self.results['stress'] = stress.flat[[0,4,8,5,2,1]] + else: + raise PropertyNotImplementedError From fd660595a1a24f70c90c15bbb0964121befb668e Mon Sep 17 00:00:00 2001 From: denghuilu Date: Thu, 4 Feb 2021 11:05:32 +0800 Subject: [PATCH 11/55] update compiler support for Ampere architecture devices --- source/op/cuda/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/op/cuda/CMakeLists.txt b/source/op/cuda/CMakeLists.txt index 89dd0b5922..0201e50130 100644 --- a/source/op/cuda/CMakeLists.txt +++ b/source/op/cuda/CMakeLists.txt @@ -28,6 +28,8 @@ if (${CUDA_VERSION_MAJOR} GREATER "10") -gencode arch=compute_61,code=sm_61; # Pascal - GTX 1080, GTX 1070, GTX 1060, GTX 1050, GTX 1030, Titan Xp, Tesla P40, Tesla P4, Discrete GPU on the NVIDIA Drive PX2 -gencode arch=compute_70,code=sm_70; # Volta - GV100/Tesla V100, GTX 1180 (GV104) -gencode arch=compute_75,code=sm_75; # Turing - RTX 2080, Titan RTX, Quadro R8000 + -gencode arch=compute_80,code=sm_80; # Anpere - A100 + -gencode arch=compute_86,code=sm_86; # Anpere - RTX 3090 -O3; -Xcompiler -fPIC; ) elseif (${CUDA_VERSION_MAJOR} STREQUAL "10") From 90938c47f74349b53a1bdf02048f9ecca81671fd Mon Sep 17 00:00:00 2001 From: denghuilu Date: Mon, 1 Mar 2021 22:49:42 +0800 Subject: [PATCH 12/55] fix bug of cuda-11 compilation --- source/op/cuda/CMakeLists.txt | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/source/op/cuda/CMakeLists.txt b/source/op/cuda/CMakeLists.txt index 0201e50130..8eab370b1d 100644 --- a/source/op/cuda/CMakeLists.txt +++ b/source/op/cuda/CMakeLists.txt @@ -19,7 +19,17 @@ include_directories(cub) message(STATUS "CUDA major version is " ${CUDA_VERSION_MAJOR}) -if (${CUDA_VERSION_MAJOR} GREATER "10") +if (${CUDA_VERSION_MAJOR} GREATER "11") + # nvcc flags + set(CUDA_NVCC_FLAGS -gencode arch=compute_60,code=sm_60; # Pascal – GP100/Tesla P100 – DGX-1 (Generic Pascal) + -gencode arch=compute_61,code=sm_61; # Pascal - GTX 1080, GTX 1070, GTX 1060, GTX 1050, GTX 1030, Titan Xp, Tesla P40, Tesla P4, Discrete GPU on the NVIDIA Drive PX2 + -gencode arch=compute_70,code=sm_70; # Volta - GV100/Tesla V100, GTX 1180 (GV104) + -gencode arch=compute_75,code=sm_75; # Turing - RTX 2080, Titan RTX, Quadro R8000 + -gencode arch=compute_80,code=sm_80; # Anpere - A100 + -gencode arch=compute_86,code=sm_86; # Anpere - RTX 3090 + -O3; -Xcompiler -fPIC; + ) +elseif (${CUDA_VERSION_MAJOR} STREQUAL "11" AND ${CUDA_VERSION_MINOR} GREATER "0") # nvcc flags set(CUDA_NVCC_FLAGS -gencode arch=compute_50,code=sm_50; -gencode arch=compute_52,code=sm_52; # Tesla M40, Tesla M40, Quadro M6000... @@ -32,6 +42,18 @@ if (${CUDA_VERSION_MAJOR} GREATER "10") -gencode arch=compute_86,code=sm_86; # Anpere - RTX 3090 -O3; -Xcompiler -fPIC; ) +elseif (${CUDA_VERSION_MAJOR} STREQUAL "11" AND ${CUDA_VERSION_MINOR} STREQUAL "0") + # nvcc flags + set(CUDA_NVCC_FLAGS -gencode arch=compute_50,code=sm_50; + -gencode arch=compute_52,code=sm_52; # Tesla M40, Tesla M40, Quadro M6000... + -gencode arch=compute_53,code=sm_53; + -gencode arch=compute_60,code=sm_60; # Pascal – GP100/Tesla P100 – DGX-1 (Generic Pascal) + -gencode arch=compute_61,code=sm_61; # Pascal - GTX 1080, GTX 1070, GTX 1060, GTX 1050, GTX 1030, Titan Xp, Tesla P40, Tesla P4, Discrete GPU on the NVIDIA Drive PX2 + -gencode arch=compute_70,code=sm_70; # Volta - GV100/Tesla V100, GTX 1180 (GV104) + -gencode arch=compute_75,code=sm_75; # Turing - RTX 2080, Titan RTX, Quadro R8000 + -gencode arch=compute_80,code=sm_80; # Anpere - A100 + -O3; -Xcompiler -fPIC; + ) elseif (${CUDA_VERSION_MAJOR} STREQUAL "10") set(CUDA_NVCC_FLAGS -gencode arch=compute_30,code=sm_30; # Tesla K10, Quadro K600 K420 K410, -gencode arch=compute_35,code=sm_35; # Tesla K20 K40, TITAN Z Black, GTX 780Ti 780 From 6cbeb2079e1b505d9f02d545eeda3950f42f6e13 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Tue, 2 Mar 2021 21:06:54 +0800 Subject: [PATCH 13/55] print message to std:cerr and return rather than assertion. The assertions will be bypassed in release building mode --- source/lib/src/NNPInter.cc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/source/lib/src/NNPInter.cc b/source/lib/src/NNPInter.cc index 8bedd88c9c..ec2b6b87da 100644 --- a/source/lib/src/NNPInter.cc +++ b/source/lib/src/NNPInter.cc @@ -190,7 +190,10 @@ void NNPInter:: init (const string & model, const int & gpu_rank) { - assert (!inited); + if (inited){ + std::cerr << "WARNING: deepmd-kit should not be initialized twice, do nothing at the second call of initializer" << std::endl; + return ; + } SessionOptions options; options.config.set_inter_op_parallelism_threads(num_inter_nthreads); options.config.set_intra_op_parallelism_threads(num_intra_nthreads); @@ -497,7 +500,10 @@ void NNPInterModelDevi:: init (const vector & models, const int & gpu_rank) { - assert (!inited); + if (inited){ + std::cerr << "WARNING: deepmd-kit should not be initialized twice, do nothing at the second call of initializer" << std::endl; + return ; + } numb_models = models.size(); sessions.resize(numb_models); graph_defs.resize(numb_models); From 5597ea2b49f96e99a52a9779b04b6c12e5a79a04 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Tue, 2 Mar 2021 21:33:46 +0800 Subject: [PATCH 14/55] set `restartinfo` to 0. Restarting the pair_style `deepmd` from restart file is not supported --- source/lmp/pair_nnp.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/source/lmp/pair_nnp.cpp b/source/lmp/pair_nnp.cpp index e8cff008f5..b5a64e8b11 100644 --- a/source/lmp/pair_nnp.cpp +++ b/source/lmp/pair_nnp.cpp @@ -206,6 +206,7 @@ PairNNP::PairNNP(LAMMPS *lmp) if (strcmp(update->unit_style,"metal") != 0) { error->all(FLERR,"Pair deepmd requires metal unit, please set it by \"units metal\""); } + restartinfo = 0; pppmflag = 1; respa_enable = 0; writedata = 0; From 7328be08982fbbbe12725ee2121a1899f206f2ce Mon Sep 17 00:00:00 2001 From: denghuilu Date: Wed, 3 Mar 2021 13:54:24 +0800 Subject: [PATCH 15/55] fix bug of max_nbor_size usage --- source/op/descrpt_se_a_multi_device.cc | 19 +++++++++++++++---- source/op/descrpt_se_r_multi_device.cc | 18 +++++++++++++++--- 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/source/op/descrpt_se_a_multi_device.cc b/source/op/descrpt_se_a_multi_device.cc index 93e2cdccac..141b2d89bc 100644 --- a/source/op/descrpt_se_a_multi_device.cc +++ b/source/op/descrpt_se_a_multi_device.cc @@ -21,6 +21,8 @@ REGISTER_OP("DescrptSeA") .Output("nlist: int32"); // only sel_a and rcut_r uesd. +#define GPU_MAX_NBOR_SIZE 4096 + struct DeviceFunctor { void operator()(const CPUDevice& d, std::string& device) { device = "CPU"; @@ -158,14 +160,14 @@ class DescrptSeAOp : public OpKernel { OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, int_shape, &int_temp)); Tensor uint64_temp; TensorShape uint64_shape; - uint64_shape.AddDim(nloc * max_nbor_size * 2); + uint64_shape.AddDim(nloc * GPU_MAX_NBOR_SIZE * 2); OP_REQUIRES_OK(context, context->allocate_temp(DT_UINT64, uint64_shape, &uint64_temp)); array_int = int_temp.flat().data(); array_longlong = uint64_temp.flat().data(); nbor_update(mesh_tensor.flat().data(), static_cast(mesh_tensor.NumElements())); - OP_REQUIRES (context, (max_nbor_size <= 4096), errors::InvalidArgument ("Assert failed, max neighbor size of atom(lammps) " + std::to_string(max_nbor_size) + " is larger than 4096, which currently is not supported by deepmd-kit.")); + OP_REQUIRES (context, (max_nbor_size <= GPU_MAX_NBOR_SIZE), errors::InvalidArgument ("Assert failed, max neighbor size of atom(lammps) " + std::to_string(max_nbor_size) + " is larger than 4096, which currently is not supported by deepmd-kit.")); } else if (device == "CPU") { memcpy (&ilist, 4 + mesh_tensor.flat().data(), sizeof(int *)); @@ -267,14 +269,23 @@ class DescrptSeAOp : public OpKernel { cudaErrcheck(cudaMemcpy(jrange, jrange_host, sizeof(int) * mesh_host[2], cudaMemcpyHostToDevice)); cudaErrcheck(cudaMemcpy(jlist, jlist_host, sizeof(int) * mesh_host[3], cudaMemcpyHostToDevice)); - max_nbor_size = 1024; + max_nbor_size = 0; for(int ii = 0; ii < mesh_host[2]; ii++) { max_nbor_size = (jrange_host[ii + 1] - jrange_host[ii]) > max_nbor_size ? (jrange_host[ii + 1] - jrange_host[ii]) : max_nbor_size; } + assert(max_nbor_size <= GPU_MAX_NBOR_SIZE); + if (max_nbor_size <= 1024) { + max_nbor_size = 1024; + } + else if (max_nbor_size <= 2048) { + max_nbor_size = 2048; + } + else { + max_nbor_size = 4096; + } } delete [] mesh_host; } - }; // Register the CPU kernels. diff --git a/source/op/descrpt_se_r_multi_device.cc b/source/op/descrpt_se_r_multi_device.cc index b94f97d6e1..c355e34f12 100644 --- a/source/op/descrpt_se_r_multi_device.cc +++ b/source/op/descrpt_se_r_multi_device.cc @@ -18,6 +18,8 @@ REGISTER_OP("DescrptSeR") .Output("rij: T") .Output("nlist: int32"); +#define GPU_MAX_NBOR_SIZE 4096 + struct DeviceFunctor { void operator()(const CPUDevice& d, std::string& device) { device = "CPU"; @@ -147,14 +149,14 @@ class DescrptSeROp : public OpKernel { OP_REQUIRES_OK(context, context->allocate_temp(DT_INT32, int_shape, &int_temp)); Tensor uint64_temp; TensorShape uint64_shape; - uint64_shape.AddDim(nloc * max_nbor_size * 2); + uint64_shape.AddDim(nloc * GPU_MAX_NBOR_SIZE * 2); OP_REQUIRES_OK(context, context->allocate_temp(DT_UINT64, uint64_shape, &uint64_temp)); array_int = int_temp.flat().data(); array_longlong = uint64_temp.flat().data(); nbor_update(mesh_tensor.flat().data(), static_cast(mesh_tensor.NumElements())); - OP_REQUIRES (context, (max_nbor_size <= 4096), errors::InvalidArgument ("Assert failed, max neighbor size of atom(lammps) " + std::to_string(max_nbor_size) + " is larger than 4096, which currently is not supported by deepmd-kit.")); + OP_REQUIRES (context, (max_nbor_size <= GPU_MAX_NBOR_SIZE), errors::InvalidArgument ("Assert failed, max neighbor size of atom(lammps) " + std::to_string(max_nbor_size) + " is larger than 4096, which currently is not supported by deepmd-kit.")); } else if (device == "CPU") { memcpy (&ilist, 4 + mesh_tensor.flat().data(), sizeof(int *)); @@ -256,10 +258,20 @@ class DescrptSeROp : public OpKernel { cudaErrcheck(cudaMemcpy(jrange, jrange_host, sizeof(int) * mesh_host[2], cudaMemcpyHostToDevice)); cudaErrcheck(cudaMemcpy(jlist, jlist_host, sizeof(int) * mesh_host[3], cudaMemcpyHostToDevice)); - max_nbor_size = 1024; + max_nbor_size = 0; for(int ii = 0; ii < mesh_host[2]; ii++) { max_nbor_size = (jrange_host[ii + 1] - jrange_host[ii]) > max_nbor_size ? (jrange_host[ii + 1] - jrange_host[ii]) : max_nbor_size; } + assert(max_nbor_size <= GPU_MAX_NBOR_SIZE); + if (max_nbor_size <= 1024) { + max_nbor_size = 1024; + } + else if (max_nbor_size <= 2048) { + max_nbor_size = 2048; + } + else { + max_nbor_size = 4096; + } } delete [] mesh_host; } From 7d259972125924c78d57602ebc436d987a535c71 Mon Sep 17 00:00:00 2001 From: denghuilu Date: Mon, 8 Mar 2021 17:45:26 +0800 Subject: [PATCH 16/55] fix bug of illegal device memory access --- source/op/descrpt_se_a_multi_device.cc | 22 +++++++--------------- source/op/descrpt_se_r_multi_device.cc | 22 +++++++--------------- 2 files changed, 14 insertions(+), 30 deletions(-) diff --git a/source/op/descrpt_se_a_multi_device.cc b/source/op/descrpt_se_a_multi_device.cc index 141b2d89bc..40f2c92eb0 100644 --- a/source/op/descrpt_se_a_multi_device.cc +++ b/source/op/descrpt_se_a_multi_device.cc @@ -237,32 +237,23 @@ class DescrptSeAOp : public OpKernel { int *mesh_host = new int[size], *ilist_host = NULL, *jrange_host = NULL, *jlist_host = NULL; cudaErrcheck(cudaMemcpy(mesh_host, mesh, sizeof(int) * size, cudaMemcpyDeviceToHost)); memcpy (&ilist_host, 4 + mesh_host, sizeof(int *)); - memcpy (&jrange_host, 8 + mesh_host, sizeof(int *)); - memcpy (&jlist_host, 12 + mesh_host, sizeof(int *)); + memcpy (&jrange_host, 8 + mesh_host, sizeof(int *)); + memcpy (&jlist_host, 12 + mesh_host, sizeof(int *)); int const ago = mesh_host[0]; - if (!init) { - ilist_size = (int)(mesh_host[1] * 1.2); - jrange_size = (int)(mesh_host[2] * 1.2); - jlist_size = (int)(mesh_host[3] * 1.2); - cudaErrcheck(cudaMalloc((void **)&ilist, sizeof(int) * ilist_size)); - cudaErrcheck(cudaMalloc((void **)&jrange, sizeof(int) * jrange_size)); - cudaErrcheck(cudaMalloc((void **)&jlist, sizeof(int) * jlist_size)); - init = true; - } - if (ago == 0) { + if (!init || ago == 0) { if (ilist_size < mesh_host[1]) { ilist_size = (int)(mesh_host[1] * 1.2); - cudaErrcheck(cudaFree(ilist)); + if (ilist != NULL) {cudaErrcheck(cudaFree(ilist));} cudaErrcheck(cudaMalloc((void **)&ilist, sizeof(int) * ilist_size)); } if (jrange_size < mesh_host[2]) { jrange_size = (int)(mesh_host[2] * 1.2); - cudaErrcheck(cudaFree(jrange)); + if (jrange != NULL) {cudaErrcheck(cudaFree(jrange));} cudaErrcheck(cudaMalloc((void **)&jrange,sizeof(int) * jrange_size)); } if (jlist_size < mesh_host[3]) { jlist_size = (int)(mesh_host[3] * 1.2); - cudaErrcheck(cudaFree(jlist)); + if (jlist != NULL) {cudaErrcheck(cudaFree(jlist));} cudaErrcheck(cudaMalloc((void **)&jlist, sizeof(int) * jlist_size)); } cudaErrcheck(cudaMemcpy(ilist, ilist_host, sizeof(int) * mesh_host[1], cudaMemcpyHostToDevice)); @@ -284,6 +275,7 @@ class DescrptSeAOp : public OpKernel { max_nbor_size = 4096; } } + init = true; delete [] mesh_host; } }; diff --git a/source/op/descrpt_se_r_multi_device.cc b/source/op/descrpt_se_r_multi_device.cc index c355e34f12..81d2603c79 100644 --- a/source/op/descrpt_se_r_multi_device.cc +++ b/source/op/descrpt_se_r_multi_device.cc @@ -226,32 +226,23 @@ class DescrptSeROp : public OpKernel { int *mesh_host = new int[size], *ilist_host = NULL, *jrange_host = NULL, *jlist_host = NULL; cudaErrcheck(cudaMemcpy(mesh_host, mesh, sizeof(int) * size, cudaMemcpyDeviceToHost)); memcpy (&ilist_host, 4 + mesh_host, sizeof(int *)); - memcpy (&jrange_host, 8 + mesh_host, sizeof(int *)); - memcpy (&jlist_host, 12 + mesh_host, sizeof(int *)); + memcpy (&jrange_host, 8 + mesh_host, sizeof(int *)); + memcpy (&jlist_host, 12 + mesh_host, sizeof(int *)); int const ago = mesh_host[0]; - if (!init) { - ilist_size = (int)(mesh_host[1] * 1.2); - jrange_size = (int)(mesh_host[2] * 1.2); - jlist_size = (int)(mesh_host[3] * 1.2); - cudaErrcheck(cudaMalloc((void **)&ilist, sizeof(int) * ilist_size)); - cudaErrcheck(cudaMalloc((void **)&jrange, sizeof(int) * jrange_size)); - cudaErrcheck(cudaMalloc((void **)&jlist, sizeof(int) * jlist_size)); - init = true; - } - if (ago == 0) { + if (!init || ago == 0) { if (ilist_size < mesh_host[1]) { ilist_size = (int)(mesh_host[1] * 1.2); - cudaErrcheck(cudaFree(ilist)); + if (ilist != NULL) {cudaErrcheck(cudaFree(ilist));} cudaErrcheck(cudaMalloc((void **)&ilist, sizeof(int) * ilist_size)); } if (jrange_size < mesh_host[2]) { jrange_size = (int)(mesh_host[2] * 1.2); - cudaErrcheck(cudaFree(jrange)); + if (jrange != NULL) {cudaErrcheck(cudaFree(jrange));} cudaErrcheck(cudaMalloc((void **)&jrange,sizeof(int) * jrange_size)); } if (jlist_size < mesh_host[3]) { jlist_size = (int)(mesh_host[3] * 1.2); - cudaErrcheck(cudaFree(jlist)); + if (jlist != NULL) {cudaErrcheck(cudaFree(jlist));} cudaErrcheck(cudaMalloc((void **)&jlist, sizeof(int) * jlist_size)); } cudaErrcheck(cudaMemcpy(ilist, ilist_host, sizeof(int) * mesh_host[1], cudaMemcpyHostToDevice)); @@ -273,6 +264,7 @@ class DescrptSeROp : public OpKernel { max_nbor_size = 4096; } } + init = true; delete [] mesh_host; } From 520c3089e216b9fd3cf883ee19b961bc10c5951e Mon Sep 17 00:00:00 2001 From: denghuilu Date: Fri, 19 Mar 2021 23:13:20 +0800 Subject: [PATCH 17/55] fix bug of max_nbor_size error --- source/op/descrpt_se_a_multi_device.cc | 2 +- source/op/descrpt_se_r_multi_device.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/op/descrpt_se_a_multi_device.cc b/source/op/descrpt_se_a_multi_device.cc index 40f2c92eb0..af18909884 100644 --- a/source/op/descrpt_se_a_multi_device.cc +++ b/source/op/descrpt_se_a_multi_device.cc @@ -261,7 +261,7 @@ class DescrptSeAOp : public OpKernel { cudaErrcheck(cudaMemcpy(jlist, jlist_host, sizeof(int) * mesh_host[3], cudaMemcpyHostToDevice)); max_nbor_size = 0; - for(int ii = 0; ii < mesh_host[2]; ii++) { + for(int ii = 0; ii < mesh_host[2] - 1; ii++) { max_nbor_size = (jrange_host[ii + 1] - jrange_host[ii]) > max_nbor_size ? (jrange_host[ii + 1] - jrange_host[ii]) : max_nbor_size; } assert(max_nbor_size <= GPU_MAX_NBOR_SIZE); diff --git a/source/op/descrpt_se_r_multi_device.cc b/source/op/descrpt_se_r_multi_device.cc index 81d2603c79..a1f50fbb95 100644 --- a/source/op/descrpt_se_r_multi_device.cc +++ b/source/op/descrpt_se_r_multi_device.cc @@ -250,7 +250,7 @@ class DescrptSeROp : public OpKernel { cudaErrcheck(cudaMemcpy(jlist, jlist_host, sizeof(int) * mesh_host[3], cudaMemcpyHostToDevice)); max_nbor_size = 0; - for(int ii = 0; ii < mesh_host[2]; ii++) { + for(int ii = 0; ii < mesh_host[2] - 1; ii++) { max_nbor_size = (jrange_host[ii + 1] - jrange_host[ii]) > max_nbor_size ? (jrange_host[ii + 1] - jrange_host[ii]) : max_nbor_size; } assert(max_nbor_size <= GPU_MAX_NBOR_SIZE); From f0d60fc75de2eb7481e082fd772b73b31a917def Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Wed, 24 Mar 2021 11:45:11 +0800 Subject: [PATCH 18/55] Update issue templates Contact me: tuop@deepmd.net --- .github/ISSUE_TEMPLATE/doc-issue-template.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/doc-issue-template.md diff --git a/.github/ISSUE_TEMPLATE/doc-issue-template.md b/.github/ISSUE_TEMPLATE/doc-issue-template.md new file mode 100644 index 0000000000..be8f103e7e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/doc-issue-template.md @@ -0,0 +1,10 @@ +--- +name: Doc issue template +about: Share your experience by writing a doc. +title: "[Doc]" +labels: '' +assignees: '' + +--- + +Do you want to contribute to our community? Start by writing down and share your experience with DP. From b28c85423bf7cf39ca4422e6e887ee1b00fbc4ef Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Wed, 24 Mar 2021 11:48:45 +0800 Subject: [PATCH 19/55] Update issue templates --- .github/ISSUE_TEMPLATE/doc-issue-template.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/doc-issue-template.md b/.github/ISSUE_TEMPLATE/doc-issue-template.md index be8f103e7e..be2c368c5f 100644 --- a/.github/ISSUE_TEMPLATE/doc-issue-template.md +++ b/.github/ISSUE_TEMPLATE/doc-issue-template.md @@ -7,4 +7,4 @@ assignees: '' --- -Do you want to contribute to our community? Start by writing down and share your experience with DP. +Do you want to contribute to our community? Start by writing down and share your experience using DP. From e16c82c540457337dd9e3531b1ec5f02d29e730e Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Wed, 24 Mar 2021 13:13:30 +0800 Subject: [PATCH 20/55] Update issue templates --- .github/ISSUE_TEMPLATE/doc-issue-template.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/doc-issue-template.md b/.github/ISSUE_TEMPLATE/doc-issue-template.md index be2c368c5f..209dbb7977 100644 --- a/.github/ISSUE_TEMPLATE/doc-issue-template.md +++ b/.github/ISSUE_TEMPLATE/doc-issue-template.md @@ -8,3 +8,11 @@ assignees: '' --- Do you want to contribute to our community? Start by writing down and share your experience using DP. +!!! Note: tag a label to your issue. +Bug: for bug report +Enhancement: for enhancement suggestions +New feature: for adding feature suggestions +Doc: for manual demandings +There are other specific wont-help type issues for you to choose. If none suit your need, then check other questions. + +We will check each issue solved (The problem is solved), running (We are on it.), later (We will fix it in the next version), wontfix (We might fix it in the future), invalid (What you reported is not actually a bug) or duplicate (The question has been asked before) regularly. From 7ba694b0929c0767809117af97c42e6b347bf346 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 25 Mar 2021 12:56:34 -0400 Subject: [PATCH 21/55] give the correct tensorflow version in the document --- doc/install.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/install.md b/doc/install.md index 05a0d3bdb0..783abf65be 100644 --- a/doc/install.md +++ b/doc/install.md @@ -61,7 +61,7 @@ We follow the virtual environment approach to install the tensorflow's Python in virtualenv -p python3 $tensorflow_venv source $tensorflow_venv/bin/activate pip install --upgrade pip -pip install --upgrade tensorflow==2.3.0 +pip install --upgrade tensorflow ``` It is highly recommanded to keep the consistency of the TensorFlow version for the python and C++ interfaces. Everytime a new shell is started and one wants to use `DeePMD-kit`, the virtual environment should be activated by @@ -78,7 +78,7 @@ virtualenv -p python3.7 $tensorflow_venv ``` If one does not need the GPU support of deepmd-kit and is concerned about package size, the CPU-only version of tensorflow should be installed by ```bash -pip install --upgrade tensorflow-cpu==2.3.0 +pip install --upgrade tensorflow-cpu ``` To verify the installation, run ```bash From c1948a2daa79a291c699cc78abe2c8385d52b820 Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Fri, 26 Mar 2021 16:08:58 +0800 Subject: [PATCH 22/55] Update issue templates --- .github/ISSUE_TEMPLATE/custom.md | 10 ++++++++++ .github/ISSUE_TEMPLATE/doc-issue-template.md | 18 ------------------ 2 files changed, 10 insertions(+), 18 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/custom.md delete mode 100644 .github/ISSUE_TEMPLATE/doc-issue-template.md diff --git a/.github/ISSUE_TEMPLATE/custom.md b/.github/ISSUE_TEMPLATE/custom.md new file mode 100644 index 0000000000..47cd8a10dc --- /dev/null +++ b/.github/ISSUE_TEMPLATE/custom.md @@ -0,0 +1,10 @@ +--- +name: Custom issue template +about: Describe this issue template's purpose here. +title: '' +labels: '' +assignees: '' + +--- + +Please tag a label to your issue. So we can locate people to solve your problem sooner. diff --git a/.github/ISSUE_TEMPLATE/doc-issue-template.md b/.github/ISSUE_TEMPLATE/doc-issue-template.md deleted file mode 100644 index 209dbb7977..0000000000 --- a/.github/ISSUE_TEMPLATE/doc-issue-template.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -name: Doc issue template -about: Share your experience by writing a doc. -title: "[Doc]" -labels: '' -assignees: '' - ---- - -Do you want to contribute to our community? Start by writing down and share your experience using DP. -!!! Note: tag a label to your issue. -Bug: for bug report -Enhancement: for enhancement suggestions -New feature: for adding feature suggestions -Doc: for manual demandings -There are other specific wont-help type issues for you to choose. If none suit your need, then check other questions. - -We will check each issue solved (The problem is solved), running (We are on it.), later (We will fix it in the next version), wontfix (We might fix it in the future), invalid (What you reported is not actually a bug) or duplicate (The question has been asked before) regularly. From a066347fa2911c4858f7cf200fdb3fd2e49b3aaa Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Sun, 28 Mar 2021 18:43:46 +0800 Subject: [PATCH 23/55] Update issue templates --- .github/ISSUE_TEMPLATE/custom.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/custom.md b/.github/ISSUE_TEMPLATE/custom.md index 47cd8a10dc..437e66cebf 100644 --- a/.github/ISSUE_TEMPLATE/custom.md +++ b/.github/ISSUE_TEMPLATE/custom.md @@ -7,4 +7,6 @@ assignees: '' --- -Please tag a label to your issue. So we can locate people to solve your problem sooner. +Issue session is for enhancement suggestions. If your question is otherwise, please go to the discussion session. Thank you for your support. And look forward to working together. + +From DeepModeling Community From bc78300e10314485c24fa49f4bd830104f7b30cb Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 08:21:56 +0800 Subject: [PATCH 24/55] Update issue templates --- .github/ISSUE_TEMPLATE/issue--bug-report.md | 27 +++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/issue--bug-report.md diff --git a/.github/ISSUE_TEMPLATE/issue--bug-report.md b/.github/ISSUE_TEMPLATE/issue--bug-report.md new file mode 100644 index 0000000000..c72e925017 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/issue--bug-report.md @@ -0,0 +1,27 @@ +--- +name: 'Issue: Bug report' +about: Create a bug report to help us eliminate issues and improve deepmd-kit. If + this doesn’t look right, [choose a different type](https://github.com/deepmodeling/deepmd-kit/issues/new/choose). +title: "[BUG] _Replace With Suitable Title_" +labels: bug +assignees: '' + +--- + +**Summary** + + + +**Deepmd-kit Version, Python Version, Tensorflow Version, GCC Version and Cuda Version.** + + + + + +**Steps to Reproduce** + + + +**Further Information, Files, and Links** + + From f82097f54a77d38ba5355251365a6bc8cd3f2edf Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 08:24:40 +0800 Subject: [PATCH 25/55] Update issue templates --- .github/ISSUE_TEMPLATE/custom.md | 12 ------------ 1 file changed, 12 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/custom.md diff --git a/.github/ISSUE_TEMPLATE/custom.md b/.github/ISSUE_TEMPLATE/custom.md deleted file mode 100644 index 437e66cebf..0000000000 --- a/.github/ISSUE_TEMPLATE/custom.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -name: Custom issue template -about: Describe this issue template's purpose here. -title: '' -labels: '' -assignees: '' - ---- - -Issue session is for enhancement suggestions. If your question is otherwise, please go to the discussion session. Thank you for your support. And look forward to working together. - -From DeepModeling Community From 5e60d189afe0eb4a07d307df1e05f164e32279b4 Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 08:29:04 +0800 Subject: [PATCH 26/55] Update issue templates --- .github/ISSUE_TEMPLATE/issue--bug-report.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/issue--bug-report.md b/.github/ISSUE_TEMPLATE/issue--bug-report.md index c72e925017..488485c60b 100644 --- a/.github/ISSUE_TEMPLATE/issue--bug-report.md +++ b/.github/ISSUE_TEMPLATE/issue--bug-report.md @@ -4,12 +4,12 @@ about: Create a bug report to help us eliminate issues and improve deepmd-kit. I this doesn’t look right, [choose a different type](https://github.com/deepmodeling/deepmd-kit/issues/new/choose). title: "[BUG] _Replace With Suitable Title_" labels: bug -assignees: '' +assignees: njzjz --- **Summary** - +[choose a different type](https://github.com/deepmodeling/deepmd-kit/issues/new/choose) **Deepmd-kit Version, Python Version, Tensorflow Version, GCC Version and Cuda Version.** From 0bc94cc4b038fe90cda7d3cd699e2d6c0b98258d Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 08:31:15 +0800 Subject: [PATCH 27/55] Update issue templates --- .github/ISSUE_TEMPLATE/issue--bug-report.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/issue--bug-report.md b/.github/ISSUE_TEMPLATE/issue--bug-report.md index 488485c60b..c72e925017 100644 --- a/.github/ISSUE_TEMPLATE/issue--bug-report.md +++ b/.github/ISSUE_TEMPLATE/issue--bug-report.md @@ -4,12 +4,12 @@ about: Create a bug report to help us eliminate issues and improve deepmd-kit. I this doesn’t look right, [choose a different type](https://github.com/deepmodeling/deepmd-kit/issues/new/choose). title: "[BUG] _Replace With Suitable Title_" labels: bug -assignees: njzjz +assignees: '' --- **Summary** -[choose a different type](https://github.com/deepmodeling/deepmd-kit/issues/new/choose) + **Deepmd-kit Version, Python Version, Tensorflow Version, GCC Version and Cuda Version.** From 46095bc15f9c4a92c8f6232ecec3b1190497bed4 Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 08:36:51 +0800 Subject: [PATCH 28/55] Update issue templates --- .../ISSUE_TEMPLATE/issue--feature-request.md | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/issue--feature-request.md diff --git a/.github/ISSUE_TEMPLATE/issue--feature-request.md b/.github/ISSUE_TEMPLATE/issue--feature-request.md new file mode 100644 index 0000000000..6d26779f1f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/issue--feature-request.md @@ -0,0 +1,21 @@ +--- +name: 'Issue: Feature request' +about: Suggest an idea for this project. If this doesn’t work right, [choose a different + type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose) +title: "[Feature Request] _Replace with Title_" +labels: new feature +assignees: '' + +--- + +**Summary** + + + +**Detailed Description** + + + +**Further Information, Files, and Links** + + From a3b620a5bdf0a86f5cf716c75113eb88e250c7bf Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 08:50:33 +0800 Subject: [PATCH 29/55] Update issue templates --- .../issue--about-input-files.md | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/issue--about-input-files.md diff --git a/.github/ISSUE_TEMPLATE/issue--about-input-files.md b/.github/ISSUE_TEMPLATE/issue--about-input-files.md new file mode 100644 index 0000000000..c8b6276db8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/issue--about-input-files.md @@ -0,0 +1,29 @@ +--- +name: 'Issue: About input files' +about: For issues that do not fit any of the other categories. If this doesn’t work + right, [choose a different type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose). +title: "[About input files] _Replace With Suitable Title_" +labels: document +assignees: '' + +--- + +**Summary** + + + +**Expected Behavior** + + + +**Actual Behavior** + + + +**The material system in question** + + + +**Further Information, Files, and Links** + + From 920f4bb98c8d40f90475d610124627dc3a2cd6b9 Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 08:51:45 +0800 Subject: [PATCH 30/55] Update issue templates --- .github/ISSUE_TEMPLATE/about-input-files.md | 29 +++++++++++++++++++++ .github/ISSUE_TEMPLATE/bug-report.md | 27 +++++++++++++++++++ .github/ISSUE_TEMPLATE/feature-request.md | 21 +++++++++++++++ 3 files changed, 77 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/about-input-files.md create mode 100644 .github/ISSUE_TEMPLATE/bug-report.md create mode 100644 .github/ISSUE_TEMPLATE/feature-request.md diff --git a/.github/ISSUE_TEMPLATE/about-input-files.md b/.github/ISSUE_TEMPLATE/about-input-files.md new file mode 100644 index 0000000000..4bcc4b7bb5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/about-input-files.md @@ -0,0 +1,29 @@ +--- +name: About input files +about: For issues that do not fit any of the other categories. If this doesn’t work + right, [choose a different type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose). +title: "[About input files] _Replace With Suitable Title_" +labels: document +assignees: '' + +--- + +**Summary** + + + +**Expected Behavior** + + + +**Actual Behavior** + + + +**The material system in question** + + + +**Further Information, Files, and Links** + + diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000000..9aa06346e6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,27 @@ +--- +name: Bug report +about: Create a bug report to help us eliminate issues and improve deepmd-kit. If + this doesn’t look right, [choose a different type](https://github.com/deepmodeling/deepmd-kit/issues/new/choose). +title: "[BUG] _Replace With Suitable Title_" +labels: bug +assignees: '' + +--- + +**Summary** + + + +**Deepmd-kit Version, Python Version, Tensorflow Version, GCC Version and Cuda Version.** + + + + + +**Steps to Reproduce** + + + +**Further Information, Files, and Links** + + diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 0000000000..2ca72d3932 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,21 @@ +--- +name: Feature request +about: Suggest an idea for this project. If this doesn’t work right, [choose a different + type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose) +title: "[Feature Request] _Replace with Title_" +labels: new feature +assignees: '' + +--- + +**Summary** + + + +**Detailed Description** + + + +**Further Information, Files, and Links** + + From 3eb7af24cf12f1b1e956d405b7e0b0207f1869f4 Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 09:06:18 +0800 Subject: [PATCH 31/55] Update issue templates --- .github/ISSUE_TEMPLATE/generic-issue.md | 23 +++++++++++++++ .../issue--about-input-files.md | 29 ------------------- .github/ISSUE_TEMPLATE/issue--bug-report.md | 27 ----------------- .../ISSUE_TEMPLATE/issue--feature-request.md | 21 -------------- .github/ISSUE_TEMPLATE/parameters.md | 25 ++++++++++++++++ 5 files changed, 48 insertions(+), 77 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/generic-issue.md delete mode 100644 .github/ISSUE_TEMPLATE/issue--about-input-files.md delete mode 100644 .github/ISSUE_TEMPLATE/issue--bug-report.md delete mode 100644 .github/ISSUE_TEMPLATE/issue--feature-request.md create mode 100644 .github/ISSUE_TEMPLATE/parameters.md diff --git a/.github/ISSUE_TEMPLATE/generic-issue.md b/.github/ISSUE_TEMPLATE/generic-issue.md new file mode 100644 index 0000000000..8be5cbd9c0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/generic-issue.md @@ -0,0 +1,23 @@ +--- +name: Generic issue +about: For issues that do not fit any of the other categories. If this doesn’t work + right, [choose a different type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose). +title: _Replace With a Descriptive Title_ +labels: wontfix +assignees: '' + +--- + +**Summary** + + + +**Deepmd-kit Version, Python Version, Tensorflow Version, GCC Version and Cuda Version.** + + + + + +**Details** + + diff --git a/.github/ISSUE_TEMPLATE/issue--about-input-files.md b/.github/ISSUE_TEMPLATE/issue--about-input-files.md deleted file mode 100644 index c8b6276db8..0000000000 --- a/.github/ISSUE_TEMPLATE/issue--about-input-files.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -name: 'Issue: About input files' -about: For issues that do not fit any of the other categories. If this doesn’t work - right, [choose a different type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose). -title: "[About input files] _Replace With Suitable Title_" -labels: document -assignees: '' - ---- - -**Summary** - - - -**Expected Behavior** - - - -**Actual Behavior** - - - -**The material system in question** - - - -**Further Information, Files, and Links** - - diff --git a/.github/ISSUE_TEMPLATE/issue--bug-report.md b/.github/ISSUE_TEMPLATE/issue--bug-report.md deleted file mode 100644 index c72e925017..0000000000 --- a/.github/ISSUE_TEMPLATE/issue--bug-report.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -name: 'Issue: Bug report' -about: Create a bug report to help us eliminate issues and improve deepmd-kit. If - this doesn’t look right, [choose a different type](https://github.com/deepmodeling/deepmd-kit/issues/new/choose). -title: "[BUG] _Replace With Suitable Title_" -labels: bug -assignees: '' - ---- - -**Summary** - - - -**Deepmd-kit Version, Python Version, Tensorflow Version, GCC Version and Cuda Version.** - - - - - -**Steps to Reproduce** - - - -**Further Information, Files, and Links** - - diff --git a/.github/ISSUE_TEMPLATE/issue--feature-request.md b/.github/ISSUE_TEMPLATE/issue--feature-request.md deleted file mode 100644 index 6d26779f1f..0000000000 --- a/.github/ISSUE_TEMPLATE/issue--feature-request.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -name: 'Issue: Feature request' -about: Suggest an idea for this project. If this doesn’t work right, [choose a different - type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose) -title: "[Feature Request] _Replace with Title_" -labels: new feature -assignees: '' - ---- - -**Summary** - - - -**Detailed Description** - - - -**Further Information, Files, and Links** - - diff --git a/.github/ISSUE_TEMPLATE/parameters.md b/.github/ISSUE_TEMPLATE/parameters.md new file mode 100644 index 0000000000..166574eac8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/parameters.md @@ -0,0 +1,25 @@ +--- +name: Parameters +about: Make a suggestion for a change of input parameters or a new output to deepmd-kit. + If this doesn’t work right, [choose a different type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose). +title: "[About input files] _Replace With Suitable Title_" +labels: document, enhancement +assignees: '' + +--- + +**Summary** + + + +**Summary** + + + +**Detailed Description** + + + +**Further Information, Files, and Links** + + From 6cafa9a39f3c3990a6788d98d8142846c848b25a Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 09:13:07 +0800 Subject: [PATCH 32/55] Update issue templates --- .github/ISSUE_TEMPLATE/about-input-files.md | 29 --------------------- .github/ISSUE_TEMPLATE/request-for-help.md | 21 +++++++++++++++ 2 files changed, 21 insertions(+), 29 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/about-input-files.md create mode 100644 .github/ISSUE_TEMPLATE/request-for-help.md diff --git a/.github/ISSUE_TEMPLATE/about-input-files.md b/.github/ISSUE_TEMPLATE/about-input-files.md deleted file mode 100644 index 4bcc4b7bb5..0000000000 --- a/.github/ISSUE_TEMPLATE/about-input-files.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -name: About input files -about: For issues that do not fit any of the other categories. If this doesn’t work - right, [choose a different type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose). -title: "[About input files] _Replace With Suitable Title_" -labels: document -assignees: '' - ---- - -**Summary** - - - -**Expected Behavior** - - - -**Actual Behavior** - - - -**The material system in question** - - - -**Further Information, Files, and Links** - - diff --git a/.github/ISSUE_TEMPLATE/request-for-help.md b/.github/ISSUE_TEMPLATE/request-for-help.md new file mode 100644 index 0000000000..daeb6a85a6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/request-for-help.md @@ -0,0 +1,21 @@ +--- +name: Request for Help +about: Don't post help requests here, go to [discussions](https://github.com/deepmodeling/deepmd-kit/discussions) + instead. +title: '' +labels: '' +assignees: '' + +--- + +Before asking questions, you can + +search the previous issues or discussions +check the [document](https://deepmd.readthedocs.io/en/stable), especially [training parameters](https://deepmd.readthedocs.io/en/stable/train-input.html). + +Please **do not** post requests for help (e.g. with installing or using deepmd-kit) here. +Instead go to [discussions](https://github.com/deepmodeling/deepmd-kit/discussions). + +This issue tracker is for tracking deepmd-kit development related issues only. + +Thanks for your cooperation. From c40414152b57a349351711bb1388183218767242 Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 09:53:48 +0800 Subject: [PATCH 33/55] Update issue templates --- .github/ISSUE_TEMPLATE/parameters.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/parameters.md b/.github/ISSUE_TEMPLATE/parameters.md index 166574eac8..c11849380e 100644 --- a/.github/ISSUE_TEMPLATE/parameters.md +++ b/.github/ISSUE_TEMPLATE/parameters.md @@ -2,7 +2,7 @@ name: Parameters about: Make a suggestion for a change of input parameters or a new output to deepmd-kit. If this doesn’t work right, [choose a different type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose). -title: "[About input files] _Replace With Suitable Title_" +title: "[Parameters] _Replace With Suitable Title_" labels: document, enhancement assignees: '' From fc6f860a9b05f9aaaf04e45c108806309907b2ec Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 10:05:38 +0800 Subject: [PATCH 34/55] Update issue templates --- .github/ISSUE_TEMPLATE/generic-issue.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/generic-issue.md b/.github/ISSUE_TEMPLATE/generic-issue.md index 8be5cbd9c0..57ff296bef 100644 --- a/.github/ISSUE_TEMPLATE/generic-issue.md +++ b/.github/ISSUE_TEMPLATE/generic-issue.md @@ -10,7 +10,7 @@ assignees: '' **Summary** - + **Deepmd-kit Version, Python Version, Tensorflow Version, GCC Version and Cuda Version.** From 6e9e553891976f19a61267ace4ae0b49e055c74a Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 10:22:17 +0800 Subject: [PATCH 35/55] Update issue templates --- .github/ISSUE_TEMPLATE/bug-report.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 9aa06346e6..5e115bf029 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -16,7 +16,7 @@ assignees: '' - + **Steps to Reproduce** From a86984093dfe789e2586f82338d2de8d4300c856 Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 10:23:44 +0800 Subject: [PATCH 36/55] Update issue templates --- .github/ISSUE_TEMPLATE/feature-request.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index 2ca72d3932..00e939342c 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -14,7 +14,7 @@ assignees: '' **Detailed Description** - + **Further Information, Files, and Links** From 512cf02cd4a9d64d9b34b099595ff46c1fe3fa72 Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Thu, 1 Apr 2021 10:25:12 +0800 Subject: [PATCH 37/55] Update issue templates --- .github/ISSUE_TEMPLATE/generic-issue.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/generic-issue.md b/.github/ISSUE_TEMPLATE/generic-issue.md index 57ff296bef..00b0913afb 100644 --- a/.github/ISSUE_TEMPLATE/generic-issue.md +++ b/.github/ISSUE_TEMPLATE/generic-issue.md @@ -16,7 +16,7 @@ assignees: '' - + **Details** From 1550bc384a74d92808f47fec16fc65c7c00b840d Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Fri, 9 Apr 2021 08:45:07 +0800 Subject: [PATCH 38/55] Update issue templates --- .github/ISSUE_TEMPLATE/bug-report.md | 4 ++-- .github/ISSUE_TEMPLATE/feature-request.md | 2 +- .github/ISSUE_TEMPLATE/parameters.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 5e115bf029..a56dd16a9c 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -12,9 +12,9 @@ assignees: '' -**Deepmd-kit Version, Python Version, Tensorflow Version, GCC Version and Cuda Version.** +**AS DETAILED AS POSSIBLE.** - + diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index 00e939342c..ec57b4605e 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -3,7 +3,7 @@ name: Feature request about: Suggest an idea for this project. If this doesn’t work right, [choose a different type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose) title: "[Feature Request] _Replace with Title_" -labels: new feature +labels: '' assignees: '' --- diff --git a/.github/ISSUE_TEMPLATE/parameters.md b/.github/ISSUE_TEMPLATE/parameters.md index c11849380e..1a9c5dbcaa 100644 --- a/.github/ISSUE_TEMPLATE/parameters.md +++ b/.github/ISSUE_TEMPLATE/parameters.md @@ -3,7 +3,7 @@ name: Parameters about: Make a suggestion for a change of input parameters or a new output to deepmd-kit. If this doesn’t work right, [choose a different type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose). title: "[Parameters] _Replace With Suitable Title_" -labels: document, enhancement +labels: enhancement assignees: '' --- From 95a80571d14a3e7add4299426b80fa83a6553319 Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Fri, 9 Apr 2021 09:08:21 +0800 Subject: [PATCH 39/55] Update issue templates --- .github/ISSUE_TEMPLATE/feature-request.md | 2 +- .github/ISSUE_TEMPLATE/parameters.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index ec57b4605e..d099345615 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -3,7 +3,7 @@ name: Feature request about: Suggest an idea for this project. If this doesn’t work right, [choose a different type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose) title: "[Feature Request] _Replace with Title_" -labels: '' +labels: enhancement assignees: '' --- diff --git a/.github/ISSUE_TEMPLATE/parameters.md b/.github/ISSUE_TEMPLATE/parameters.md index 1a9c5dbcaa..ff9188f06e 100644 --- a/.github/ISSUE_TEMPLATE/parameters.md +++ b/.github/ISSUE_TEMPLATE/parameters.md @@ -3,7 +3,7 @@ name: Parameters about: Make a suggestion for a change of input parameters or a new output to deepmd-kit. If this doesn’t work right, [choose a different type]( https://github.com/deepmodeling/deepmd-kit/issues/new/choose). title: "[Parameters] _Replace With Suitable Title_" -labels: enhancement +labels: documentation, enhancement assignees: '' --- From 5c764165e83031089ce053fdef4b68adadeacc25 Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Fri, 9 Apr 2021 15:27:51 +0800 Subject: [PATCH 40/55] Update issue templates --- .github/ISSUE_TEMPLATE/bug-report.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index a56dd16a9c..3b6d0b60d7 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -12,7 +12,7 @@ assignees: '' -**AS DETAILED AS POSSIBLE.** +**Deepmd-kit version, installation way, input file, running commands, error log, etc.** From f66c78ec3ac21248d2a0dc0427f8d87d503e9636 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 19 Apr 2021 19:25:00 -0400 Subject: [PATCH 41/55] Do not find protobuf for python In conda-forge's recent released tensorflow, protobuf is external from tensorflow and not inside the tensorflow directory. --- source/cmake/Findtensorflow.cmake | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/cmake/Findtensorflow.cmake b/source/cmake/Findtensorflow.cmake index 708b8e86d5..e77376ad3c 100644 --- a/source/cmake/Findtensorflow.cmake +++ b/source/cmake/Findtensorflow.cmake @@ -32,6 +32,7 @@ find_path(TensorFlow_INCLUDE_DIRS PATH_SUFFIXES "/include" NO_DEFAULT_PATH ) +if (BUILD_CPP_IF) find_path(TensorFlow_INCLUDE_DIRS_GOOGLE NAMES google/protobuf/type.pb.h @@ -40,6 +41,7 @@ find_path(TensorFlow_INCLUDE_DIRS_GOOGLE NO_DEFAULT_PATH ) list(APPEND TensorFlow_INCLUDE_DIRS ${TensorFlow_INCLUDE_DIRS_GOOGLE}) +endif () if (NOT TensorFlow_INCLUDE_DIRS AND tensorflow_FIND_REQUIRED) message(FATAL_ERROR From ee8c419517fad9d7bcbf9fa13321a024889e5440 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 19 Apr 2021 20:09:23 -0400 Subject: [PATCH 42/55] only enable link what you use on GNU compilers Clang doesn't support this flag, cause an error on osx: > ld: unknown option: --no-as-needed This commit should be cherry-picked to `r1.2` branch. --- source/CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/CMakeLists.txt b/source/CMakeLists.txt index dc35ee5dc0..b92ea9574d 100644 --- a/source/CMakeLists.txt +++ b/source/CMakeLists.txt @@ -1,6 +1,8 @@ cmake_minimum_required(VERSION 3.7) project(DeePMD) -set(CMAKE_LINK_WHAT_YOU_USE TRUE) +if (CMAKE_COMPILER_IS_GNU) + set(CMAKE_LINK_WHAT_YOU_USE TRUE) +endif () # build cpp or python interfaces if (NOT DEFINED BUILD_CPP_IF) From 58d775aca715fc91a08d0a8743886c687be8d3dc Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 24 Apr 2021 03:41:38 -0400 Subject: [PATCH 43/55] add more badges --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 2ce48317fd..b383d7c55f 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,11 @@ DeePMD-kit Manual ======== [![GitHub release](https://img.shields.io/github/release/deepmodeling/deepmd-kit.svg?maxAge=86400)](https://github.com/deepmodeling/deepmd-kit/releases) +[![doi:10.1016/j.cpc.2018.03.016](https://img.shields.io/badge/DOI-10.1016%2Fj.cpc.2018.03.016-blue)](https://doi.org/10.1016/j.cpc.2020.107206) +[![offline packages](https://img.shields.io/github/downloads/deepmodeling/deepmd-kit/total?label=offline%20packages)](https://github.com/deepmodeling/deepmd-kit/releases) +[![conda install](https://img.shields.io/badge/downloads-9k%20total-green.svg?style=round-square&label=conda%20install)](https://anaconda.org/deepmodeling/deepmd-kit) +[![pip install](https://img.shields.io/pypi/dm/deepmd-kit?label=pip%20install)](https://pypi.org/project/deepmd-kit) +[![docker pull](https://img.shields.io/docker/pulls/deepmodeling/deepmd-kit)](https://hub.docker.com/r/deepmodeling/deepmd-kit) [![Documentation Status](https://readthedocs.org/projects/deepmd/badge/?version=latest)](https://deepmd.readthedocs.io/en/latest/?badge=latest) # Table of contents From 1e6dae6507975d1010ba8d5f531e1abd0242a6b2 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 24 Apr 2021 04:04:26 -0400 Subject: [PATCH 44/55] fix the document badge link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b383d7c55f..77c867bc49 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ [![conda install](https://img.shields.io/badge/downloads-9k%20total-green.svg?style=round-square&label=conda%20install)](https://anaconda.org/deepmodeling/deepmd-kit) [![pip install](https://img.shields.io/pypi/dm/deepmd-kit?label=pip%20install)](https://pypi.org/project/deepmd-kit) [![docker pull](https://img.shields.io/docker/pulls/deepmodeling/deepmd-kit)](https://hub.docker.com/r/deepmodeling/deepmd-kit) -[![Documentation Status](https://readthedocs.org/projects/deepmd/badge/?version=latest)](https://deepmd.readthedocs.io/en/latest/?badge=latest) +[![Documentation Status](https://readthedocs.org/projects/deepmd/badge/)](https://deepmd.readthedocs.io/) # Table of contents - [About DeePMD-kit](#about-deepmd-kit) From 816b803f33e12559b9bc335e45de3310dafcc3fc Mon Sep 17 00:00:00 2001 From: Han Wang Date: Sat, 8 May 2021 09:01:50 +0800 Subject: [PATCH 45/55] fix issue #598 --- source/lmp/pair_nnp.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/lmp/pair_nnp.cpp b/source/lmp/pair_nnp.cpp index b5a64e8b11..f472e0e81e 100644 --- a/source/lmp/pair_nnp.cpp +++ b/source/lmp/pair_nnp.cpp @@ -482,10 +482,10 @@ void PairNNP::compute(int eflag, int vflag) nnp_inter_model_devi.compute_avg (tmp_avg_f_, all_force_); nnp_inter_model_devi.compute_std_f (std_f_, tmp_avg_f_, all_force_); std_f.resize(std_f_.size()); - for (int dd = 0; dd < std_f_.size(); ++dd) std_f[dd] = std_f_[dd]; if (out_rel == 1){ nnp_inter_model_devi.compute_relative_std_f (std_f_, tmp_avg_f_, eps); } + for (int dd = 0; dd < std_f_.size(); ++dd) std_f[dd] = std_f_[dd]; #endif double min = numeric_limits::max(), max = 0, avg = 0; ana_st(max, min, avg, std_f, nlocal); From 495270a82d1da10445a87424a2108a6d8c8da6df Mon Sep 17 00:00:00 2001 From: denghuilu Date: Wed, 12 May 2021 21:26:35 +0800 Subject: [PATCH 46/55] add GPU UUID support for DP --- source/train/Local.py | 2 +- source/train/RunOptions.py.in | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/train/Local.py b/source/train/Local.py index d2564ff17c..1f228fe8ba 100644 --- a/source/train/Local.py +++ b/source/train/Local.py @@ -7,7 +7,7 @@ def get_resource (): if gpus is not None : if gpus != "" : gpus = gpus.split(",") - gpus = [int(ii) for ii in gpus] + gpus = [ii for ii in gpus] else : gpus = [] return nodename, nodelist, gpus diff --git a/source/train/RunOptions.py.in b/source/train/RunOptions.py.in index 63d3544ca6..607519b87e 100644 --- a/source/train/RunOptions.py.in +++ b/source/train/RunOptions.py.in @@ -227,7 +227,7 @@ class RunOptions (object) : self.my_task_index = 0 self.my_socket = None if gpus is not None and len(gpus) > 0: - self.my_device = "gpu:%d" % gpus[0] + self.my_device = "gpu:" + str(gpus[0]) # self.my_device = "gpu:0" else : self.my_device = "cpu:0" From d0f5f188244559ab0a19908f2da8d6d5776c057c Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Tue, 18 May 2021 19:05:31 -0400 Subject: [PATCH 47/55] append to out_file when LAMMPS restarts (#641) This ensures the out file will not be override when LAMMPS restarts. This commit may be conflicted with #392. Commit @5597ea2b49f96e99a52a9779b04b6c12e5a79a04 should be dropped. --- source/lmp/pair_nnp.cpp | 18 +++++++++++++++++- source/lmp/pair_nnp.h.in | 3 +++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/source/lmp/pair_nnp.cpp b/source/lmp/pair_nnp.cpp index f472e0e81e..5d3cfbc691 100644 --- a/source/lmp/pair_nnp.cpp +++ b/source/lmp/pair_nnp.cpp @@ -206,7 +206,7 @@ PairNNP::PairNNP(LAMMPS *lmp) if (strcmp(update->unit_style,"metal") != 0) { error->all(FLERR,"Pair deepmd requires metal unit, please set it by \"units metal\""); } - restartinfo = 0; + restartinfo = 1; pppmflag = 1; respa_enable = 0; writedata = 0; @@ -222,6 +222,7 @@ PairNNP::PairNNP(LAMMPS *lmp) single_model = false; multi_models_mod_devi = false; multi_models_no_mod_devi = false; + is_restart = false; // set comm size needed by this Pair comm_reverse = 1; @@ -754,6 +755,7 @@ void PairNNP::settings(int narg, char **arg) if (comm->me == 0){ if (numb_models > 1 && out_freq > 0){ + if (!is_restart) { fp.open (out_file); fp << scientific; fp << "#" @@ -765,6 +767,10 @@ void PairNNP::settings(int narg, char **arg) << setw(18+1) << "min_devi_f" << setw(18+1) << "avg_devi_f" << endl; + } else { + fp.open (out_file, std::ofstream::out | std::ofstream::app); + fp << scientific; + } } string pre = " "; cout << pre << ">>> Info of model(s):" << endl @@ -800,6 +806,16 @@ void PairNNP::settings(int narg, char **arg) all_force.resize(numb_models); } +void PairDeepMD::read_restart(FILE *) +{ + is_restart = true; +} + +void PairDeepMD::write_restart(FILE *) +{ + // pass +} + /* ---------------------------------------------------------------------- set coeffs for one or more type pairs ------------------------------------------------------------------------- */ diff --git a/source/lmp/pair_nnp.h.in b/source/lmp/pair_nnp.h.in index d41546438c..8a3c03bba4 100644 --- a/source/lmp/pair_nnp.h.in +++ b/source/lmp/pair_nnp.h.in @@ -59,6 +59,8 @@ class PairNNP : public Pair { void settings(int, char **); virtual void coeff(int, char **); void init_style(); + virtual void write_restart(FILE *); + virtual void read_restart(FILE *); double init_one(int i, int j); int pack_reverse_comm(int, int, double *); void unpack_reverse_comm(int, int *, double *); @@ -85,6 +87,7 @@ private: bool single_model; bool multi_models_mod_devi; bool multi_models_no_mod_devi; + bool is_restart; #ifdef HIGH_PREC vector fparam; vector aparam; From ceb07e537ee01f473478463dd22b79dd4479c6e5 Mon Sep 17 00:00:00 2001 From: tuoping <80671886+tuoping@users.noreply.github.com> Date: Tue, 25 May 2021 09:55:46 +0800 Subject: [PATCH 48/55] Update bug-report.md Add request not to use image to show error log. --- .github/ISSUE_TEMPLATE/bug-report.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 3b6d0b60d7..80c0ef7a13 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -14,7 +14,7 @@ assignees: '' **Deepmd-kit version, installation way, input file, running commands, error log, etc.** - + From 91f2ffcc41470950beac7ecad849494d51964cd9 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sat, 29 May 2021 06:08:54 -0400 Subject: [PATCH 49/55] fix class name in #641 --- source/lmp/pair_nnp.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/lmp/pair_nnp.cpp b/source/lmp/pair_nnp.cpp index 5d3cfbc691..4a2b0dd8d1 100644 --- a/source/lmp/pair_nnp.cpp +++ b/source/lmp/pair_nnp.cpp @@ -806,12 +806,12 @@ void PairNNP::settings(int narg, char **arg) all_force.resize(numb_models); } -void PairDeepMD::read_restart(FILE *) +void PairNNP::read_restart(FILE *) { is_restart = true; } -void PairDeepMD::write_restart(FILE *) +void PairNNP::write_restart(FILE *) { // pass } From 23c59e1a2a9fe7e3d57ea210726c7e890fdc196f Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 8 Aug 2021 19:37:46 -0400 Subject: [PATCH 50/55] add aliases to Arguments (#932) fix #846. --- source/train/argcheck.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/train/argcheck.py b/source/train/argcheck.py index c358c114ff..931782d30f 100644 --- a/source/train/argcheck.py +++ b/source/train/argcheck.py @@ -60,7 +60,7 @@ def descrpt_se_a_args(): Argument("rcut", float, optional = True, default = 6.0, doc = doc_rcut), Argument("rcut_smth", float, optional = True, default = 0.5, doc = doc_rcut_smth), Argument("neuron", list, optional = True, default = [10,20,40], doc = doc_neuron), - Argument("axis_neuron", int, optional = True, default = 4, doc = doc_axis_neuron), + Argument("axis_neuron", int, optional = True, default = 4, n_axis_neuron = ['n_axis_neuron'], doc = doc_axis_neuron), Argument("activation_function", str, optional = True, default = 'tanh', doc = doc_activation_function), Argument("resnet_dt", bool, optional = True, default = False, doc = doc_resnet_dt), Argument("type_one_side", bool, optional = True, default = False, doc = doc_type_one_side), @@ -146,7 +146,7 @@ def fitting_ener(): return [ Argument("numb_fparam", int, optional = True, default = 0, doc = doc_numb_fparam), Argument("numb_aparam", int, optional = True, default = 0, doc = doc_numb_aparam), - Argument("neuron", list, optional = True, default = [120,120,120], doc = doc_neuron), + Argument("neuron", list, optional = True, default = [120,120,120], alias = ['n_neuron'], doc = doc_neuron), Argument("activation_function", str, optional = True, default = 'tanh', doc = doc_activation_function), Argument("precision", str, optional = True, default = 'float64', doc = doc_precision), Argument("resnet_dt", bool, optional = True, default = True, doc = doc_resnet_dt), @@ -169,14 +169,14 @@ def fitting_polar(): doc_seed = 'Random seed for parameter initialization of the fitting net' return [ - Argument("neuron", list, optional = True, default = [120,120,120], doc = doc_neuron), + Argument("neuron", list, optional = True, default = [120,120,120], alias = ['n_neuron'], doc = doc_neuron), Argument("activation_function", str, optional = True, default = 'tanh', doc = doc_activation_function), Argument("resnet_dt", bool, optional = True, default = True, doc = doc_resnet_dt), Argument("precision", str, optional = True, default = 'float64', doc = doc_precision), Argument("fit_diag", bool, optional = True, default = True, doc = doc_fit_diag), Argument("scale", [list,float], optional = True, default = 1.0, doc = doc_scale), Argument("diag_shift", [list,float], optional = True, default = 0.0, doc = doc_diag_shift), - Argument("sel_type", [list,int,None], optional = True, doc = doc_sel_type), + Argument("sel_type", [list,int,None], optional = True, alias = ['pol_type'], doc = doc_sel_type), Argument("seed", [int,None], optional = True, doc = doc_seed) ] @@ -193,11 +193,11 @@ def fitting_dipole(): doc_sel_type = 'The atom types for which the atomic dipole will be provided. If not set, all types will be selected.' doc_seed = 'Random seed for parameter initialization of the fitting net' return [ - Argument("neuron", list, optional = True, default = [120,120,120], doc = doc_neuron), + Argument("neuron", list, optional = True, default = [120,120,120], alias = ['n_neuron'], doc = doc_neuron), Argument("activation_function", str, optional = True, default = 'tanh', doc = doc_activation_function), Argument("resnet_dt", bool, optional = True, default = True, doc = doc_resnet_dt), Argument("precision", str, optional = True, default = 'float64', doc = doc_precision), - Argument("sel_type", [list,int,None], optional = True, doc = doc_sel_type), + Argument("sel_type", [list,int,None], optional = True, alias = ['dipole_type'], doc = doc_sel_type), Argument("seed", [int,None], optional = True, doc = doc_seed) ] From d7e7dbbecc052316dca225dfe8a345fa39037472 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Sun, 8 Aug 2021 20:19:27 -0400 Subject: [PATCH 51/55] fix a typo in #932 (#934) * add aliases to Arguments fix #846. * fix a typo in #932 --- source/train/argcheck.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/train/argcheck.py b/source/train/argcheck.py index 931782d30f..2d72340da7 100644 --- a/source/train/argcheck.py +++ b/source/train/argcheck.py @@ -60,7 +60,7 @@ def descrpt_se_a_args(): Argument("rcut", float, optional = True, default = 6.0, doc = doc_rcut), Argument("rcut_smth", float, optional = True, default = 0.5, doc = doc_rcut_smth), Argument("neuron", list, optional = True, default = [10,20,40], doc = doc_neuron), - Argument("axis_neuron", int, optional = True, default = 4, n_axis_neuron = ['n_axis_neuron'], doc = doc_axis_neuron), + Argument("axis_neuron", int, optional = True, default = 4, alias = ['n_axis_neuron'], doc = doc_axis_neuron), Argument("activation_function", str, optional = True, default = 'tanh', doc = doc_activation_function), Argument("resnet_dt", bool, optional = True, default = False, doc = doc_resnet_dt), Argument("type_one_side", bool, optional = True, default = False, doc = doc_type_one_side), From c9fb8f485e23f64c319526b966346890aaa74ebb Mon Sep 17 00:00:00 2001 From: Han Wang Date: Mon, 9 Aug 2021 11:22:31 +0800 Subject: [PATCH 52/55] rm `load_ckpt` (#936) * rm load_ckpt * rm wrong files Co-authored-by: Han Wang --- doc/use-deepmd-kit.md | 4 ++-- examples/fparam/train/input.json | 2 +- examples/fparam/train/input_aparam.json | 2 +- examples/water/train/polar.json | 2 +- examples/water/train/polar_se_a.json | 2 +- examples/water/train/wannier.json | 2 +- examples/water/train/water.json | 2 +- examples/water/train/water_se_a.json | 2 +- examples/water/train/water_se_ar.json | 2 +- examples/water/train/water_se_r.json | 2 +- examples/water/train/water_srtab_example.json | 2 +- 11 files changed, 12 insertions(+), 12 deletions(-) diff --git a/doc/use-deepmd-kit.md b/doc/use-deepmd-kit.md index cbf7a91cbc..a6e3915b3b 100644 --- a/doc/use-deepmd-kit.md +++ b/doc/use-deepmd-kit.md @@ -156,7 +156,7 @@ An example of `training` is "numb_test": 10, "save_freq": 1000, "save_ckpt": "model.ckpt", - "load_ckpt": "model.ckpt", + "disp_training":true, "time_training":true, "profiling": false, @@ -189,7 +189,7 @@ During the training, the error of the model is tested every **`disp_freq`** batc ``` The first column displays the number of batches. The second and third columns display the loss function evaluated by `numb_test` frames randomly chosen from the test set and that evaluated by the current training batch, respectively. The fourth and fifth columns display the RMS energy error (normalized by number of atoms) evaluated by `numb_test` frames randomly chosen from the test set and that evaluated by the current training batch, respectively. The sixth and seventh columns display the RMS force error (component-wise) evaluated by `numb_test` frames randomly chosen from the test set and that evaluated by the current training batch, respectively. The last column displays the current learning rate. -Checkpoints will be written to files with prefix **`save_ckpt`** every **`save_freq`** batches. If **`restart`** is set to `true`, then the training will start from the checkpoint named **`load_ckpt`**, rather than from scratch. +Checkpoints will be written to files with prefix **`save_ckpt`** every **`save_freq`** batches. Several command line options can be passed to `dp train`, which can be checked with ```bash diff --git a/examples/fparam/train/input.json b/examples/fparam/train/input.json index c57afdfb7f..0455f11b42 100644 --- a/examples/fparam/train/input.json +++ b/examples/fparam/train/input.json @@ -51,7 +51,7 @@ "numb_test": 10, "save_freq": 1000, "save_ckpt": "model.ckpt", - "load_ckpt": "model.ckpt", + "disp_training":true, "time_training":true, "profiling": false, diff --git a/examples/fparam/train/input_aparam.json b/examples/fparam/train/input_aparam.json index 86be27ef29..5774130fe1 100644 --- a/examples/fparam/train/input_aparam.json +++ b/examples/fparam/train/input_aparam.json @@ -51,7 +51,7 @@ "numb_test": 10, "save_freq": 1000, "save_ckpt": "model.ckpt", - "load_ckpt": "model.ckpt", + "disp_training":true, "time_training":true, "profiling": false, diff --git a/examples/water/train/polar.json b/examples/water/train/polar.json index 60e3fa3494..3437747e49 100644 --- a/examples/water/train/polar.json +++ b/examples/water/train/polar.json @@ -53,7 +53,7 @@ "numb_test": 10, "save_freq": 1000, "save_ckpt": "model.ckpt", - "load_ckpt": "model.ckpt", + "disp_training":true, "time_training":true, "_comment": "that's all" diff --git a/examples/water/train/polar_se_a.json b/examples/water/train/polar_se_a.json index dc90e481ce..e0c2939722 100644 --- a/examples/water/train/polar_se_a.json +++ b/examples/water/train/polar_se_a.json @@ -51,7 +51,7 @@ "numb_test": 10, "save_freq": 1000, "save_ckpt": "model.ckpt", - "load_ckpt": "model.ckpt", + "disp_training":true, "time_training":true, "_comment": "that's all" diff --git a/examples/water/train/wannier.json b/examples/water/train/wannier.json index f23f5e0d62..06b3bc906d 100644 --- a/examples/water/train/wannier.json +++ b/examples/water/train/wannier.json @@ -54,7 +54,7 @@ "numb_test": 10, "save_freq": 1000, "save_ckpt": "model.ckpt", - "load_ckpt": "model.ckpt", + "disp_training":true, "time_training":true, "_comment": "that's all" diff --git a/examples/water/train/water.json b/examples/water/train/water.json index 23ba559aed..ccc47a5d3a 100644 --- a/examples/water/train/water.json +++ b/examples/water/train/water.json @@ -61,7 +61,7 @@ "numb_test": 10, "save_freq": 1000, "save_ckpt": "model.ckpt", - "load_ckpt": "model.ckpt", + "disp_training":true, "time_training":true, "_comment": "that's all" diff --git a/examples/water/train/water_se_a.json b/examples/water/train/water_se_a.json index cb005530c1..5c9b2d8e70 100644 --- a/examples/water/train/water_se_a.json +++ b/examples/water/train/water_se_a.json @@ -56,7 +56,7 @@ "numb_test": 10, "save_freq": 1000, "save_ckpt": "model.ckpt", - "load_ckpt": "model.ckpt", + "disp_training":true, "time_training":true, "profiling": false, diff --git a/examples/water/train/water_se_ar.json b/examples/water/train/water_se_ar.json index 2173f2e1d9..c97b3d1f70 100644 --- a/examples/water/train/water_se_ar.json +++ b/examples/water/train/water_se_ar.json @@ -67,7 +67,7 @@ "numb_test": 10, "save_freq": 1000, "save_ckpt": "model.ckpt", - "load_ckpt": "model.ckpt", + "disp_training":true, "time_training":true, "profiling": false, diff --git a/examples/water/train/water_se_r.json b/examples/water/train/water_se_r.json index 7faf55a3c3..3954e0b9aa 100644 --- a/examples/water/train/water_se_r.json +++ b/examples/water/train/water_se_r.json @@ -56,7 +56,7 @@ "numb_test": 10, "save_freq": 1000, "save_ckpt": "model.ckpt", - "load_ckpt": "model.ckpt", + "disp_training":true, "time_training":true, "profiling": false, diff --git a/examples/water/train/water_srtab_example.json b/examples/water/train/water_srtab_example.json index f2a0a4a39c..3baabd44fd 100644 --- a/examples/water/train/water_srtab_example.json +++ b/examples/water/train/water_srtab_example.json @@ -64,7 +64,7 @@ "numb_test": 10, "save_freq": 1000, "save_ckpt": "model.ckpt", - "load_ckpt": "model.ckpt", + "disp_training":true, "time_training":true, "_comment": "that's all" From 90b1f5270d1e44a0b32870632438f1baec35763f Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Thu, 26 Aug 2021 08:58:10 -0400 Subject: [PATCH 53/55] Fix typo: CMAKE_COMPILER_IS_GNUCXX (#1038) (#1041) The flag won't work without language specified. See https://cmake.org/cmake/help/v3.4/variable/CMAKE_COMPILER_IS_GNULANG.html (cherry picked from commit 8bbe565c876d3e1424ac9840c466e502e98da528) Co-authored-by: Chun Cai --- source/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/CMakeLists.txt b/source/CMakeLists.txt index b92ea9574d..556a7da17a 100644 --- a/source/CMakeLists.txt +++ b/source/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 3.7) project(DeePMD) -if (CMAKE_COMPILER_IS_GNU) +if (CMAKE_COMPILER_IS_GNUCXX) set(CMAKE_LINK_WHAT_YOU_USE TRUE) endif () From f923ee55748c7644011e1a3f3ce6122828e6f71a Mon Sep 17 00:00:00 2001 From: Han Wang Date: Fri, 27 Aug 2021 18:05:03 +0800 Subject: [PATCH 54/55] change to almostEqual in UT to avoid difference at FP precision --- source/tests/test_model_devi.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/tests/test_model_devi.py b/source/tests/test_model_devi.py index 3e5161b44d..4d5414cea8 100644 --- a/source/tests/test_model_devi.py +++ b/source/tests/test_model_devi.py @@ -36,11 +36,11 @@ def test_calc_model_devi(self): frequency=self.freq, nopbc=True, fname=self.output) - self.assertEqual(model_devi[0][0], 0) - self.assertEqual(model_devi[1][0], self.freq) + self.assertAlmostEqual(model_devi[0][0], 0) + self.assertAlmostEqual(model_devi[1][0], self.freq) for ii in range(1, 7): self.assertAlmostEqual(model_devi[0][ii], self.expect[ii]) - self.assertEqual(model_devi[0][ii], model_devi[1][ii]) + self.assertAlmostEqual(model_devi[0][ii], model_devi[1][ii]) self.assertTrue(os.path.isfile(self.output)) def tearDown(self): From 0a766eabdbd637f1c74323274fb3a70f6eb3b5aa Mon Sep 17 00:00:00 2001 From: Chenxing Luo Date: Tue, 7 Sep 2021 23:10:29 -0400 Subject: [PATCH 55/55] Fix missing `std::numeric_limits` - Include `` header, fix missing `std::numeric_limits` when compiling with GCC 11.0. --- source/lib/src/neighbor_list.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/source/lib/src/neighbor_list.cc b/source/lib/src/neighbor_list.cc index c3cd376fbe..cae7630430 100644 --- a/source/lib/src/neighbor_list.cc +++ b/source/lib/src/neighbor_list.cc @@ -1,6 +1,7 @@ #include "neighbor_list.h" #include "device.h" #include +#include // #include // using namespace std;