diff --git a/README.md b/README.md index 178758f5db..5520daa57f 100644 --- a/README.md +++ b/README.md @@ -10,9 +10,9 @@ - [Deep Potential in a nutshell](#deep-potential-in-a-nutshell) - [Download and install](#download-and-install) - [Easy installation methods](#easy-installation-methods) + - [Offline packages](#offline-packages) - [With Docker](#with-docker) - [With conda](#with-conda) - - [Offline packages](#offline-packages) - [Install the python interaction](#install-the-python-interface) - [Install the Tensorflow's python interface](#install-the-tensorflows-python-interface) - [Install the DeePMD-kit's python interface](#install-the-deepmd-kits-python-interface) @@ -90,8 +90,10 @@ Please follow our [github](https://github.com/deepmodeling/deepmd-kit) webpage t ## Easy installation methods There various easy methods to install DeePMD-kit. Choose one that you prefer. If you want to build by yourself, jump to the next two sections. -### With Docker -A docker for installing the DeePMD-kit on CentOS 7 is available [here](https://github.com/frankhan91/deepmd-kit_docker). +After your easy installation, DeePMD-kit (`dp`) and LAMMPS (`lmp`) will be available to execute. You can try `dp -h` and `lmp -h` to see the help. `mpirun` is also available considering you may want to run LAMMPS in parallel. + +### Offline packages +Both CPU and GPU version offline packages are avaiable in [the Releases page](https://github.com/deepmodeling/deepmd-kit/releases). ### With conda DeePMD-kit is avaiable with [conda](https://github.com/conda/conda). Install [Anaconda](https://www.anaconda.com/distribution/#download-section) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html) first. @@ -101,13 +103,23 @@ To install the CPU version: conda install deepmd-kit=*=*cpu lammps-dp=*=*cpu -c deepmodeling ``` -To install the GPU version containing [CUDA 10.0](https://docs.nvidia.com/deploy/cuda-compatibility/index.html#binary-compatibility__table-toolkit-driver): +To install the GPU version containing [CUDA 10.1](https://docs.nvidia.com/deploy/cuda-compatibility/index.html#binary-compatibility__table-toolkit-driver): ```bash conda install deepmd-kit=*=*gpu lammps-dp=*=*gpu -c deepmodeling ``` -### Offline packages -Both CPU and GPU version offline package are avaiable in [the Releases page](https://github.com/deepmodeling/deepmd-kit/releases). +### With Docker +A docker for installing the DeePMD-kit is available [here](https://github.com/orgs/deepmodeling/packages/container/deepmd-kit). + +To pull the CPU version: +```bash +docker pull ghcr.io/deepmodeling/deepmd-kit:1.2.0_cpu +``` + +To pull the GPU version: +```bash +docker pull ghcr.io/deepmodeling/deepmd-kit:1.2.0_cuda10.1_gpu +``` ## Install the python interface ### Install the Tensorflow's python interface diff --git a/data/json/json2yaml.py b/data/json/json2yaml.py new file mode 100644 index 0000000000..f601928427 --- /dev/null +++ b/data/json/json2yaml.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 + +import argparse +import json +from pathlib import Path +from warnings import warn + +import yaml + + +def _main(): + parser = argparse.ArgumentParser( + description="convert json config file to yaml", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + # get all json files in dir + jsons = [p for p in Path.cwd().glob("*.json")] + # use the newest as autosuggestion + jsons.sort(key=lambda x: x.stat().st_mtime, reverse=True) + jfile = jsons[0] + yfile = jfile.with_suffix(".yaml") + + parser.add_argument("INPUT", default=jfile, type=Path, nargs="?", + help="input json file") + parser.add_argument("OUTPUT", default=yfile, type=Path, nargs="?", + help="output yaml file") + args = parser.parse_args() + + with args.INPUT.open("r") as infile, args.OUTPUT.open("w") as outfile: + yaml.dump(json.load(infile), outfile, default_flow_style=False, + sort_keys=False) + + warn("The order of the keys won't be preserved!", SyntaxWarning) + warn("_comment keys will also be lostt in the conversion") + +if __name__ == "__main__": + _main() diff --git a/setup.py b/setup.py index 99a86d3da8..117ccec2a2 100644 --- a/setup.py +++ b/setup.py @@ -4,18 +4,8 @@ from setuptools_scm import get_version from packaging.version import LegacyVersion from os import path, makedirs -import imp, sys, platform +import os, imp, sys, platform, sysconfig -def get_dp_install_path() : - site_packages_path = path.join(path.dirname(path.__file__), 'site-packages') - dp_scm_version = get_version(root="./", relative_to=__file__) - python_version = 'py' + str(sys.version_info.major + sys.version_info.minor * 0.1) - os_info = sys.platform - machine_info = platform.machine() - dp_pip_install_path = site_packages_path + '/deepmd' - dp_setup_install_path = site_packages_path + '/deepmd_kit-' + dp_scm_version + '-' + python_version + '-' + os_info + '-' + machine_info + '.egg/deepmd' - - return dp_pip_install_path, dp_setup_install_path readme_file = path.join(path.dirname(path.abspath(__file__)), 'README.md') try: @@ -31,7 +21,8 @@ def get_dp_install_path() : site_packages_path = path.join(path.dirname(path.__file__), 'site-packages') tf_install_dir = imp.find_module('tensorflow', [site_packages_path])[1] -install_requires=['numpy', 'scipy'] + +install_requires=['numpy', 'scipy', 'pyyaml'] setup_requires=['setuptools_scm', 'scikit-build', 'cmake'] # add cmake as a build requirement if cmake>3.0 is not installed @@ -46,7 +37,6 @@ def get_dp_install_path() : except OSError: pass -dp_pip_install_path, dp_setup_install_path = get_dp_install_path() setup( name="deepmd-kit", @@ -70,8 +60,6 @@ def get_dp_install_path() : '-DBUILD_PY_IF:BOOL=TRUE', '-DBUILD_CPP_IF:BOOL=FALSE', '-DFLOAT_PREC:STRING=high', - '-DDP_PIP_INSTALL_PATH=%s' % dp_pip_install_path, - '-DDP_SETUP_INSTALL_PATH=%s' % dp_setup_install_path, ], cmake_source_dir='source', cmake_minimum_required_version='3.0', diff --git a/source/op/CMakeLists.txt b/source/op/CMakeLists.txt index 993a1b6fd4..e73e5dcb49 100644 --- a/source/op/CMakeLists.txt +++ b/source/op/CMakeLists.txt @@ -25,7 +25,7 @@ endif (BUILD_CPP_IF) if (BUILD_PY_IF) set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE) - set(CMAKE_INSTALL_RPATH DESTINATION ${DP_PIP_INSTALL_PATH} ${DP_SETUP_INSTALL_PATH} ${CMAKE_BINARY_DIR}/op/cuda) + set(CMAKE_INSTALL_RPATH $ORIGIN) if (USE_CUDA_TOOLKIT) add_library(op_abi SHARED ${OP_PY_CUDA_SRC} ${OP_LIB}) add_library(op_grads SHARED ${OP_GRADS_SRC}) diff --git a/source/op/cuda/descrpt_se_a.cu b/source/op/cuda/descrpt_se_a.cu index 1636df8ff5..09b2ed2638 100644 --- a/source/op/cuda/descrpt_se_a.cu +++ b/source/op/cuda/descrpt_se_a.cu @@ -77,19 +77,20 @@ __device__ inline T dev_dot(T * arr1, T * arr2) { return arr1[0] * arr2[0] + arr1[1] * arr2[1] + arr1[2] * arr2[2]; } -__device__ inline void spline5_switch(compute_t & vv, - compute_t & dd, - compute_t & xx, - const compute_t & rmin, - const compute_t & rmax) +template +__device__ inline void spline5_switch(FPTYPE & vv, + FPTYPE & dd, + FPTYPE & xx, + const float & rmin, + const float & rmax) { if (xx < rmin) { dd = 0; vv = 1; } else if (xx < rmax) { - compute_t uu = (xx - rmin) / (rmax - rmin) ; - compute_t du = 1. / (rmax - rmin) ; + FPTYPE uu = (xx - rmin) / (rmax - rmin) ; + FPTYPE du = 1. / (rmax - rmin) ; vv = uu*uu*uu * (-6 * uu*uu + 15 * uu - 10) + 1; dd = ( 3 * uu*uu * (-6 * uu*uu + 15 * uu - 10) + uu*uu*uu * (-12 * uu + 15) ) * du; } @@ -180,20 +181,21 @@ __global__ void format_nlist_fill_b_se_a(int * nlist, } //it's ok! -__global__ void compute_descriptor_se_a (VALUETYPE* descript, +template +__global__ void compute_descriptor_se_a (FPTYPE* descript, const int ndescrpt, - VALUETYPE* descript_deriv, + FPTYPE* descript_deriv, const int descript_deriv_size, - VALUETYPE* rij, + FPTYPE* rij, const int rij_size, const int* type, - const VALUETYPE* avg, - const VALUETYPE* std, + const FPTYPE* avg, + const FPTYPE* std, int* nlist, const int nlist_size, - const VALUETYPE* coord, - const VALUETYPE rmin, - const VALUETYPE rmax, + const FPTYPE* coord, + const float rmin, + const float rmax, const int sec_a_size) { // <<>> @@ -204,9 +206,9 @@ __global__ void compute_descriptor_se_a (VALUETYPE* descript, if (idy >= sec_a_size) {return;} // else {return;} - VALUETYPE * row_descript = descript + idx * ndescrpt; - VALUETYPE * row_descript_deriv = descript_deriv + idx * descript_deriv_size; - VALUETYPE * row_rij = rij + idx * rij_size; + FPTYPE * row_descript = descript + idx * ndescrpt; + FPTYPE * row_descript_deriv = descript_deriv + idx * descript_deriv_size; + FPTYPE * row_rij = rij + idx * rij_size; int * row_nlist = nlist + idx * nlist_size; if (row_nlist[idy] >= 0) { @@ -214,14 +216,14 @@ __global__ void compute_descriptor_se_a (VALUETYPE* descript, for (int kk = 0; kk < 3; kk++) { row_rij[idy * 3 + kk] = coord[j_idx * 3 + kk] - coord[idx * 3 + kk]; } - const compute_t * rr = &row_rij[idy * 3 + 0]; - compute_t nr2 = dev_dot(rr, rr); - compute_t inr = 1./sqrt(nr2); - compute_t nr = nr2 * inr; - compute_t inr2 = inr * inr; - compute_t inr4 = inr2 * inr2; - compute_t inr3 = inr4 * nr; - compute_t sw, dsw; + const FPTYPE * rr = &row_rij[idy * 3 + 0]; + FPTYPE nr2 = dev_dot(rr, rr); + FPTYPE inr = 1./sqrt(nr2); + FPTYPE nr = nr2 * inr; + FPTYPE inr2 = inr * inr; + FPTYPE inr4 = inr2 * inr2; + FPTYPE inr3 = inr4 * nr; + FPTYPE sw, dsw; spline5_switch(sw, dsw, nr, rmin, rmax); row_descript[idx_value + 0] = (1./nr) ;//* sw; row_descript[idx_value + 1] = (rr[0] / nr2) ;//* sw; diff --git a/source/tests/test_compat_input.py b/source/tests/test_compat_input.py new file mode 100644 index 0000000000..c0a29283dd --- /dev/null +++ b/source/tests/test_compat_input.py @@ -0,0 +1,32 @@ +import os,sys +import numpy as np +import unittest + +from deepmd.compat import convert_input_v0_v1 +from deepmd.common import j_loader + +class TestConvertInput (unittest.TestCase) : + def test_convert_smth(self): + jdata0 = j_loader(os.path.join('compat_inputs', 'water_se_a_v0.json')) + jdata1 = j_loader(os.path.join('compat_inputs', 'water_se_a_v1.json')) + jdata = convert_input_v0_v1(jdata0, warning = False, dump = None) + self.assertEqual(jdata, jdata1) + + def test_convert_nonsmth(self): + jdata0 = j_loader(os.path.join('compat_inputs', 'water_v0.json')) + jdata1 = j_loader(os.path.join('compat_inputs', 'water_v1.json')) + jdata = convert_input_v0_v1(jdata0, warning = False, dump = None) + self.assertEqual(jdata, jdata1) + + def test_json_yaml_equal(self): + + inputs = ("water_v1", "water_se_a_v1") + + for i in inputs: + jdata = j_loader(os.path.join('yaml_inputs', f'{i}.json')) + ydata = j_loader(os.path.join('yaml_inputs', f'{i}.yaml')) + self.assertEqual(jdata, ydata) + + with self.assertRaises(TypeError): + j_loader("path_with_wrong.extension") + diff --git a/source/tests/test_compat_input_v0_v1.py b/source/tests/test_compat_input_v0_v1.py deleted file mode 100644 index 6890a89301..0000000000 --- a/source/tests/test_compat_input_v0_v1.py +++ /dev/null @@ -1,23 +0,0 @@ -import os,sys,json -import numpy as np -import unittest - -from deepmd.compat import convert_input_v0_v1 - -class TestConvertInputV0V1 (unittest.TestCase) : - def test_convert_smth(self): - with open(os.path.join('compat_inputs', 'water_se_a_v0.json')) as fp: - jdata0 = json.load(fp) - with open(os.path.join('compat_inputs', 'water_se_a_v1.json')) as fp: - jdata1 = json.load(fp) - jdata = convert_input_v0_v1(jdata0, warning = False, dump = None) - self.assertEqual(jdata, jdata1) - - def test_convert_nonsmth(self): - with open(os.path.join('compat_inputs', 'water_v0.json')) as fp: - jdata0 = json.load(fp) - with open(os.path.join('compat_inputs', 'water_v1.json')) as fp: - jdata1 = json.load(fp) - jdata = convert_input_v0_v1(jdata0, warning = False, dump = None) - self.assertEqual(jdata, jdata1) - diff --git a/source/tests/test_data_modifier.py b/source/tests/test_data_modifier.py index 4e7b43663e..f71e31377d 100644 --- a/source/tests/test_data_modifier.py +++ b/source/tests/test_data_modifier.py @@ -1,9 +1,9 @@ -import os,sys,platform,json +import os,sys,platform import numpy as np import unittest from deepmd.env import tf -from deepmd.common import j_must_have, data_requirement +from deepmd.common import j_must_have, data_requirement, j_loader from deepmd.RunOptions import RunOptions from deepmd.Trainer import NNPTrainer from deepmd.DataSystem import DeepmdDataSystem @@ -45,8 +45,7 @@ def tearDown(self): def _setUp(self): args = Args() run_opt = RunOptions(args, False) - with open (args.INPUT, 'r') as fp: - jdata = json.load (fp) + jdata = j_loader(args.INPUT) # init model model = NNPTrainer (jdata, run_opt = run_opt) diff --git a/source/tests/test_fitting_stat.py b/source/tests/test_fitting_stat.py index 0cbd693ae1..1e9c48b30b 100644 --- a/source/tests/test_fitting_stat.py +++ b/source/tests/test_fitting_stat.py @@ -1,10 +1,11 @@ -import os,sys,json +import os,sys import numpy as np import unittest from collections import defaultdict from deepmd.DescrptSeA import DescrptSeA from deepmd.Fitting import EnerFitting +from deepmd.common import j_loader input_json = 'water_se_a_afparam.json' @@ -57,9 +58,8 @@ def _brute_aparam(data, ndim): class TestEnerFittingStat (unittest.TestCase) : def test (self) : - with open(input_json) as fp: - jdata = json.load(fp) - jdata = jdata['model'] + jdata = j_loader(input_json) + jdata = jdata['model'] descrpt = DescrptSeA(jdata['descriptor']) fitting = EnerFitting(jdata['fitting_net'], descrpt) avgs = [0, 10] diff --git a/source/tests/test_model_loc_frame.py b/source/tests/test_model_loc_frame.py index b651862885..e79e59de1a 100644 --- a/source/tests/test_model_loc_frame.py +++ b/source/tests/test_model_loc_frame.py @@ -1,4 +1,4 @@ -import dpdata,os,sys,json,unittest +import dpdata,os,sys,unittest import numpy as np from deepmd.env import tf from common import Data,gen_data @@ -8,7 +8,7 @@ from deepmd.DescrptLocFrame import DescrptLocFrame from deepmd.Fitting import EnerFitting from deepmd.Model import Model -from deepmd.common import j_must_have, j_must_have_d, j_have +from deepmd.common import j_must_have, j_must_have_d, j_have, j_loader global_ener_float_precision = tf.float64 global_tf_float_precision = tf.float64 @@ -21,8 +21,7 @@ def setUp(self) : def test_model(self): jfile = 'water.json' - with open(jfile) as fp: - jdata = json.load (fp) + jdata = j_loader(jfile) run_opt = RunOptions(None) systems = j_must_have(jdata, 'systems') set_pfx = j_must_have(jdata, 'set_prefix') diff --git a/source/tests/test_model_se_a.py b/source/tests/test_model_se_a.py index 0d54f14c5f..2d32d89e45 100644 --- a/source/tests/test_model_se_a.py +++ b/source/tests/test_model_se_a.py @@ -1,5 +1,5 @@ -import dpdata,os,sys,json,unittest +import dpdata,os,sys,unittest import numpy as np from deepmd.env import tf from common import Data,gen_data @@ -9,7 +9,7 @@ from deepmd.DescrptSeA import DescrptSeA from deepmd.Fitting import EnerFitting from deepmd.Model import Model -from deepmd.common import j_must_have, j_must_have_d, j_have +from deepmd.common import j_must_have, j_must_have_d, j_have, j_loader global_ener_float_precision = tf.float64 global_tf_float_precision = tf.float64 @@ -21,8 +21,8 @@ def setUp(self) : def test_model(self): jfile = 'water_se_a.json' - with open(jfile) as fp: - jdata = json.load (fp) + jdata = j_loader(jfile) + run_opt = RunOptions(None) systems = j_must_have(jdata, 'systems') set_pfx = j_must_have(jdata, 'set_prefix') diff --git a/source/tests/test_model_se_a_aparam.py b/source/tests/test_model_se_a_aparam.py index 58b060225c..f22629ca19 100644 --- a/source/tests/test_model_se_a_aparam.py +++ b/source/tests/test_model_se_a_aparam.py @@ -1,4 +1,4 @@ -import dpdata,os,sys,json,unittest +import dpdata,os,sys,unittest import numpy as np from deepmd.env import tf from common import Data,gen_data @@ -8,7 +8,7 @@ from deepmd.DescrptSeA import DescrptSeA from deepmd.Fitting import EnerFitting from deepmd.Model import Model -from deepmd.common import j_must_have, j_must_have_d, j_have +from deepmd.common import j_must_have, j_must_have_d, j_have, j_loader global_ener_float_precision = tf.float64 global_tf_float_precision = tf.float64 @@ -20,8 +20,7 @@ def setUp(self) : def test_model(self): jfile = 'water_se_a_aparam.json' - with open(jfile) as fp: - jdata = json.load (fp) + jdata = j_loader(jfile) run_opt = RunOptions(None) systems = j_must_have(jdata, 'systems') set_pfx = j_must_have(jdata, 'set_prefix') diff --git a/source/tests/test_model_se_a_fparam.py b/source/tests/test_model_se_a_fparam.py index ec4a46c7d4..7c5ca2dfc6 100644 --- a/source/tests/test_model_se_a_fparam.py +++ b/source/tests/test_model_se_a_fparam.py @@ -1,4 +1,4 @@ -import dpdata,os,sys,json,unittest +import dpdata,os,sys,unittest import numpy as np from deepmd.env import tf from common import Data,gen_data @@ -8,7 +8,7 @@ from deepmd.DescrptSeA import DescrptSeA from deepmd.Fitting import EnerFitting from deepmd.Model import Model -from deepmd.common import j_must_have, j_must_have_d, j_have +from deepmd.common import j_must_have, j_must_have_d, j_have, j_loader global_ener_float_precision = tf.float64 global_tf_float_precision = tf.float64 @@ -20,8 +20,8 @@ def setUp(self) : def test_model(self): jfile = 'water_se_a_fparam.json' - with open(jfile) as fp: - jdata = json.load (fp) + jdata = j_loader(jfile) + run_opt = RunOptions(None) systems = j_must_have(jdata, 'systems') set_pfx = j_must_have(jdata, 'set_prefix') diff --git a/source/tests/test_model_se_a_srtab.py b/source/tests/test_model_se_a_srtab.py index c2950fe788..2eeda45b50 100644 --- a/source/tests/test_model_se_a_srtab.py +++ b/source/tests/test_model_se_a_srtab.py @@ -1,4 +1,4 @@ -import dpdata,os,sys,json,unittest +import dpdata,os,sys,unittest import numpy as np from deepmd.env import tf from common import Data,gen_data @@ -8,7 +8,7 @@ from deepmd.DescrptSeA import DescrptSeA from deepmd.Fitting import EnerFitting from deepmd.Model import Model -from deepmd.common import j_must_have, j_must_have_d, j_have +from deepmd.common import j_must_have, j_must_have_d, j_have, j_loader global_ener_float_precision = tf.float64 global_tf_float_precision = tf.float64 @@ -30,8 +30,8 @@ def setUp(self) : def test_model(self): jfile = 'water_se_a.json' - with open(jfile) as fp: - jdata = json.load (fp) + jdata = j_loader(jfile) + run_opt = RunOptions(None) systems = j_must_have(jdata, 'systems') set_pfx = j_must_have(jdata, 'set_prefix') diff --git a/source/tests/test_model_se_r.py b/source/tests/test_model_se_r.py index d3607a9164..32e3276760 100644 --- a/source/tests/test_model_se_r.py +++ b/source/tests/test_model_se_r.py @@ -1,4 +1,4 @@ -import dpdata,os,sys,json,unittest +import dpdata,os,sys,unittest import numpy as np from deepmd.env import tf from common import Data,gen_data @@ -8,7 +8,7 @@ from deepmd.DescrptSeR import DescrptSeR from deepmd.Fitting import EnerFitting from deepmd.Model import Model -from deepmd.common import j_must_have, j_must_have_d, j_have +from deepmd.common import j_must_have, j_must_have_d, j_have, j_loader global_ener_float_precision = tf.float64 global_tf_float_precision = tf.float64 @@ -20,8 +20,8 @@ def setUp(self) : def test_model(self): jfile = 'water_se_r.json' - with open(jfile) as fp: - jdata = json.load (fp) + jdata = j_loader(jfile) + run_opt = RunOptions(None) systems = j_must_have(jdata, 'systems') set_pfx = j_must_have(jdata, 'set_prefix') diff --git a/source/tests/test_polar_se_a.py b/source/tests/test_polar_se_a.py index 275b4fa707..ad2168dcb5 100644 --- a/source/tests/test_polar_se_a.py +++ b/source/tests/test_polar_se_a.py @@ -1,4 +1,4 @@ -import dpdata,os,sys,json,unittest +import dpdata,os,sys,unittest import numpy as np from deepmd.env import tf from common import Data,gen_data @@ -8,7 +8,7 @@ from deepmd.DescrptSeA import DescrptSeA from deepmd.Fitting import PolarFittingSeA from deepmd.Model import PolarModel -from deepmd.common import j_must_have, j_must_have_d, j_have +from deepmd.common import j_must_have, j_must_have_d, j_have, j_loader global_ener_float_precision = tf.float64 global_tf_float_precision = tf.float64 @@ -20,8 +20,8 @@ def setUp(self) : def test_model(self): jfile = 'polar_se_a.json' - with open(jfile) as fp: - jdata = json.load (fp) + jdata = j_loader(jfile) + run_opt = RunOptions(None) systems = j_must_have(jdata, 'systems') set_pfx = j_must_have(jdata, 'set_prefix') diff --git a/source/tests/test_wfc.py b/source/tests/test_wfc.py index d4b408cd60..876f4dba0a 100644 --- a/source/tests/test_wfc.py +++ b/source/tests/test_wfc.py @@ -1,4 +1,4 @@ -import dpdata,os,sys,json,unittest +import dpdata,os,sys,unittest import numpy as np from deepmd.env import tf from common import Data,gen_data @@ -8,7 +8,7 @@ from deepmd.DescrptLocFrame import DescrptLocFrame from deepmd.Fitting import WFCFitting from deepmd.Model import WFCModel -from deepmd.common import j_must_have, j_must_have_d, j_have +from deepmd.common import j_must_have, j_must_have_d, j_have, j_loader global_ener_float_precision = tf.float64 global_tf_float_precision = tf.float64 @@ -20,8 +20,8 @@ def setUp(self) : def test_model(self): jfile = 'wfc.json' - with open(jfile) as fp: - jdata = json.load (fp) + jdata = j_loader(jfile) + run_opt = RunOptions(None) systems = j_must_have(jdata, 'systems') set_pfx = j_must_have(jdata, 'set_prefix') diff --git a/source/tests/yaml_inputs/water_se_a_v1.json b/source/tests/yaml_inputs/water_se_a_v1.json new file mode 100644 index 0000000000..402da962ca --- /dev/null +++ b/source/tests/yaml_inputs/water_se_a_v1.json @@ -0,0 +1,55 @@ +{ + "model": { + "descriptor" :{ + "type": "se_a", + "sel": [46, 92], + "rcut_smth": 5.80, + "rcut": 6.00, + "neuron": [25, 50, 100], + "axis_neuron": 16, + "resnet_dt": false, + "seed": 1 + }, + "fitting_net" : { + "neuron": [240, 240, 240], + "resnet_dt": true, + "seed": 1 + } + }, + + "learning_rate" :{ + "type": "exp", + "decay_steps": 5000, + "decay_rate": 0.95, + "start_lr": 0.001 + }, + + "loss" :{ + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + }, + + "training" : { + "systems": ["../data/"], + "set_prefix": "set", + "stop_batch": 1000000, + "batch_size": [1], + + "seed": 1, + + "disp_file": "lcurve.out", + "disp_freq": 100, + "numb_test": 10, + "save_freq": 1000, + "save_ckpt": "model.ckpt", + "disp_training":true, + "time_training":true, + "profiling": true, + "profiling_file":"timeline.json" + } +} + diff --git a/source/tests/yaml_inputs/water_se_a_v1.yaml b/source/tests/yaml_inputs/water_se_a_v1.yaml new file mode 100644 index 0000000000..55580daf1e --- /dev/null +++ b/source/tests/yaml_inputs/water_se_a_v1.yaml @@ -0,0 +1,50 @@ +model: + descriptor: + type: se_a + sel: + - 46 + - 92 + rcut_smth: 5.8 + rcut: 6.0 + neuron: + - 25 + - 50 + - 100 + axis_neuron: 16 + resnet_dt: false + seed: 1 + fitting_net: + neuron: + - 240 + - 240 + - 240 + resnet_dt: true + seed: 1 +learning_rate: + type: exp + decay_steps: 5000 + decay_rate: 0.95 + start_lr: 0.001 +loss: + start_pref_e: 0.02 + limit_pref_e: 1 + start_pref_f: 1000 + limit_pref_f: 1 + start_pref_v: 0 + limit_pref_v: 0 +training: + systems: ['../data/'] + set_prefix: set + stop_batch: 1000000 + batch_size: + - 1 + seed: 1 + disp_file: lcurve.out + disp_freq: 100 + numb_test: 10 + save_freq: 1000 + save_ckpt: model.ckpt + disp_training: true + time_training: true + profiling: true + profiling_file: timeline.json diff --git a/source/tests/yaml_inputs/water_v1.json b/source/tests/yaml_inputs/water_v1.json new file mode 100644 index 0000000000..e5f2032ea2 --- /dev/null +++ b/source/tests/yaml_inputs/water_v1.json @@ -0,0 +1,51 @@ +{ + "with_distrib": false, + "model":{ + "descriptor": { + "type": "loc_frame", + "sel_a": [16, 32], + "sel_r": [30, 60], + "rcut": 6.00, + "axis_rule": [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0] + }, + "fitting_net": { + "neuron": [240, 120, 60, 30, 10], + "resnet_dt": true, + "seed": 1 + } + }, + + "learning_rate" :{ + "type": "exp", + "decay_steps": 5000, + "decay_rate": 0.95, + "start_lr": 0.001 + }, + + "loss" : { + "start_pref_e": 0.02, + "limit_pref_e": 8, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0 + }, + + "training": { + "systems": ["../data/"], + "set_prefix": "set", + "stop_batch": 1000000, + "batch_size": [4], + + "seed": 1, + + "disp_file": "lcurve.out", + "disp_freq": 100, + "numb_test": 10, + "save_freq": 1000, + "save_ckpt": "model.ckpt", + "disp_training":true, + "time_training":true + } +} + diff --git a/source/tests/yaml_inputs/water_v1.yaml b/source/tests/yaml_inputs/water_v1.yaml new file mode 100644 index 0000000000..5121a961b0 --- /dev/null +++ b/source/tests/yaml_inputs/water_v1.yaml @@ -0,0 +1,48 @@ +with_distrib: false +model: + descriptor: + type: loc_frame + sel_a: + - 16 + - 32 + sel_r: + - 30 + - 60 + rcut: 6.0 + axis_rule: [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0] + fitting_net: + neuron: + - 240 + - 120 + - 60 + - 30 + - 10 + resnet_dt: true + seed: 1 +learning_rate: + type: exp + decay_steps: 5000 + decay_rate: 0.95 + start_lr: 0.001 +loss: + start_pref_e: 0.02 + limit_pref_e: 8 + start_pref_f: 1000 + limit_pref_f: 1 + start_pref_v: 0 + limit_pref_v: 0 +training: + systems: + - ../data/ + set_prefix: set + stop_batch: 1000000 + batch_size: + - 4 + seed: 1 + disp_file: lcurve.out + disp_freq: 100 + numb_test: 10 + save_freq: 1000 + save_ckpt: model.ckpt + disp_training: true + time_training: true diff --git a/source/train/Loss.py b/source/train/Loss.py index d939273f26..1f336325a3 100644 --- a/source/train/Loss.py +++ b/source/train/Loss.py @@ -301,11 +301,12 @@ def build (self, polar_hat = label_dict[self.label_name] polar = model_dict[self.tensor_name] l2_loss = tf.reduce_mean( tf.square(self.scale*(polar - polar_hat)), name='l2_'+suffix) + more_loss = {'nonorm': l2_loss} if not self.atomic : atom_norm = 1./ global_cvt_2_tf_float(natoms[0]) l2_loss = l2_loss * atom_norm self.l2_l = l2_loss - more_loss = {} + self.l2_more = more_loss['nonorm'] return l2_loss, more_loss @@ -321,10 +322,10 @@ def print_on_training(self, feed_dict_test, feed_dict_batch) : error_test\ - = sess.run([self.l2_l], \ + = sess.run([self.l2_more], \ feed_dict=feed_dict_test) error_train\ - = sess.run([self.l2_l], \ + = sess.run([self.l2_more], \ feed_dict=feed_dict_batch) print_str = "" prop_fmt = " %9.2e %9.2e" diff --git a/source/train/common.py b/source/train/common.py index 887669a278..83f5e6ecf3 100644 --- a/source/train/common.py +++ b/source/train/common.py @@ -4,6 +4,8 @@ from deepmd.env import tf from deepmd.env import op_module from deepmd.RunOptions import global_tf_float_precision +import json +import yaml # def gelu(x): # """Gaussian Error Linear Unit. @@ -163,7 +165,18 @@ def j_must_have_d (jdata, key, deprecated_key) : def j_have (jdata, key) : return key in jdata.keys() - + +def j_loader(filename): + + if filename.endswith("json"): + with open(filename, 'r') as fp: + return json.load(fp) + elif filename.endswith(("yml", "yaml")): + with open(filename, 'r') as fp: + return yaml.safe_load(fp) + else: + raise TypeError("config file must be json, or yaml/yml") + def get_activation_func(activation_fn): if activation_fn not in activation_fn_dict: raise RuntimeError(activation_fn+" is not a valid activation function") diff --git a/source/train/print_old_model.py b/source/train/print_old_model.py index 14719723f9..d125e7f8b6 100644 --- a/source/train/print_old_model.py +++ b/source/train/print_old_model.py @@ -1,4 +1,4 @@ -import dpdata,os,sys,json +import dpdata,os,sys import numpy as np import tensorflow as tf from common import Data @@ -12,7 +12,7 @@ from deepmd.DataSystem import DataSystem from deepmd.Model import NNPModel from deepmd.Model import LearingRate -from deepmd.common import j_must_have +from deepmd.common import j_must_have, j_loader def gen_data() : tmpdata = Data(rand_pert = 0.1, seed = 1) @@ -32,8 +32,7 @@ def gen_data() : np.save('system/set.000/fparam.npy', tmpdata.fparam) def compute_efv(jfile): - fp = open (jfile, 'r') - jdata = json.load (fp) + jdata = j_loader(jfile) run_opt = RunOptions(None) systems = j_must_have(jdata, 'systems') set_pfx = j_must_have(jdata, 'set_prefix') diff --git a/source/train/test.py b/source/train/test.py index d8639020ce..e8953d1872 100644 --- a/source/train/test.py +++ b/source/train/test.py @@ -12,6 +12,7 @@ from deepmd import DeepPot from deepmd import DeepDipole from deepmd import DeepPolar +from deepmd import DeepGlobalPolar from deepmd import DeepWFC from tensorflow.python.framework import ops @@ -28,6 +29,8 @@ def test (args): dp = DeepDipole(args.model) elif de.model_type == 'polar': dp = DeepPolar(args.model) + elif de.model_type == 'global_polar': + dp = DeepGlobalPolar(args.model) elif de.model_type == 'wfc': dp = DeepWFC(args.model) else : @@ -41,7 +44,9 @@ def test (args): elif de.model_type == 'dipole': err, siz = test_dipole(dp, args) elif de.model_type == 'polar': - err, siz = test_polar(dp, args) + err, siz = test_polar(dp, args, global_polar=False) + elif de.model_type == 'global_polar': + err, siz = test_polar(dp, args, global_polar=True) elif de.model_type == 'wfc': err, siz = test_wfc(dp, args) else : @@ -50,7 +55,7 @@ def test (args): err_coll.append(err) siz_coll.append(siz) avg_err = weighted_average(err_coll, siz_coll) - if len(all_sys) != len(err): + if len(all_sys) != len(err_coll): print('Not all systems are tested! Check if the systems are valid') if len(all_sys) > 1: print ("# ----------weighted average of errors----------- ") @@ -61,6 +66,8 @@ def test (args): print_dipole_sys_avg(avg_err) elif de.model_type == 'polar': print_polar_sys_avg(avg_err) + elif de.model_type == 'global_polar': + print_polar_sys_avg(avg_err) elif de.model_type == 'wfc': print_wfc_sys_avg(avg_err) else : @@ -223,12 +230,15 @@ def print_wfc_sys_avg(avg): print ("WFC L2err : %e eV/A" % avg[0]) -def test_polar (dp, args) : +def test_polar (dp, args, global_polar = False) : if args.rand_seed is not None : np.random.seed(args.rand_seed % (2**32)) data = DeepmdData(args.system, args.set_prefix, shuffle_test = args.shuffle_test) - data.add('polarizability', 9, atomic=True, must=True, high_prec=False, type_sel = dp.get_sel_type()) + if not global_polar: + data.add('polarizability', 9, atomic=True, must=True, high_prec=False, type_sel = dp.get_sel_type()) + else: + data.add('polarizability', 9, atomic=False, must=True, high_prec=False, type_sel = dp.get_sel_type()) test_data = data.get_test () numb_test = args.numb_test natoms = len(test_data["type"][0]) @@ -239,12 +249,21 @@ def test_polar (dp, args) : box = test_data["box"][:numb_test] atype = test_data["type"][0] polar = dp.eval(coord, box, atype) + sel_type = dp.get_sel_type() + sel_natoms = 0 + for ii in sel_type: + sel_natoms += sum(atype == ii) polar = polar.reshape([numb_test,-1]) l2f = (l2err (polar - test_data["polarizability"] [:numb_test])) + l2fs = l2f/np.sqrt(sel_natoms) + l2fa = l2f/sel_natoms print ("# number of test data : %d " % numb_test) - print ("Polarizability L2err : %e eV/A" % l2f) + print ("Polarizability L2err : %e eV/A" % l2f) + if global_polar: + print ("Polarizability L2err/sqrtN : %e eV/A" % l2fs) + print ("Polarizability L2err/N : %e eV/A" % l2fa) detail_file = args.detail_file if detail_file is not None : diff --git a/source/train/train.py b/source/train/train.py index c89760fa4d..3e7ba2955b 100755 --- a/source/train/train.py +++ b/source/train/train.py @@ -4,13 +4,12 @@ import sys import time import numpy as np -import json from deepmd.env import tf from deepmd.compat import convert_input_v0_v1 from deepmd.RunOptions import RunOptions from deepmd.DataSystem import DeepmdDataSystem from deepmd.Trainer import NNPTrainer -from deepmd.common import data_requirement, expand_sys_str +from deepmd.common import data_requirement, expand_sys_str, j_loader from deepmd.DataModifier import DipoleChargeModifier def create_done_queue(cluster_spec, task_index): @@ -49,8 +48,8 @@ def j_must_have (jdata, key) : def train (args) : # load json database - with open (args.INPUT, 'r') as fp: - jdata = json.load (fp) + jdata = j_loader(args.INPUT) + if not 'model' in jdata.keys(): jdata = convert_input_v0_v1(jdata, warning = True,