diff --git a/deepmd/common.py b/deepmd/common.py index 25ed0b1eab..a1628644c7 100644 --- a/deepmd/common.py +++ b/deepmd/common.py @@ -212,141 +212,6 @@ def make_default_mesh( return default_mesh -# TODO not an ideal approach, every class uses this to parse arguments on its own, json -# TODO should be parsed once and the parsed result passed to all objects that need it -class ClassArg: - """Class that take care of input json/yaml parsing. - - The rules for parsing are defined by the `add` method, than `parse` is called to - process the supplied dict - - Attributes - ---------- - arg_dict: Dict[str, Any] - dictionary containing parsing rules - alias_map: Dict[str, Any] - dictionary with keyword aliases - """ - - def __init__(self) -> None: - self.arg_dict = {} - self.alias_map = {} - - def add( - self, - key: str, - types_: Union[type, List[type]], - alias: Optional[Union[str, List[str]]] = None, - default: Optional[Any] = None, - must: bool = False, - ) -> "ClassArg": - """Add key to be parsed. - - Parameters - ---------- - key : str - key name - types_ : Union[type, List[type]] - list of allowed key types - alias : Optional[Union[str, List[str]]], optional - alias for the key, by default None - default : Any, optional - default value for the key, by default None - must : bool, optional - if the key is mandatory, by default False - - Returns - ------- - ClassArg - instance with added key - """ - if not isinstance(types_, list): - types = [types_] - else: - types = types_ - if alias is not None: - if not isinstance(alias, list): - alias_ = [alias] - else: - alias_ = alias - else: - alias_ = [] - - self.arg_dict[key] = { - "types": types, - "alias": alias_, - "value": default, - "must": must, - } - for ii in alias_: - self.alias_map[ii] = key - - return self - - def _add_single(self, key: str, data: Any): - vtype = type(data) - if data is None: - return data - if not (vtype in self.arg_dict[key]["types"]): - for tp in self.arg_dict[key]["types"]: - try: - vv = tp(data) - except TypeError: - pass - else: - break - else: - raise TypeError( - f"cannot convert provided key {key} to type(s) " - f'{self.arg_dict[key]["types"]} ' - ) - else: - vv = data - self.arg_dict[key]["value"] = vv - - def _check_must(self): - for kk in self.arg_dict: - if self.arg_dict[kk]["must"] and self.arg_dict[kk]["value"] is None: - raise RuntimeError(f"key {kk} must be provided") - - def parse(self, jdata: Dict[str, Any]) -> Dict[str, Any]: - """Parse input dictionary, use the rules defined by add method. - - Parameters - ---------- - jdata : Dict[str, Any] - loaded json/yaml data - - Returns - ------- - Dict[str, Any] - parsed dictionary - """ - for kk in jdata.keys(): - if kk in self.arg_dict: - key = kk - self._add_single(key, jdata[kk]) - else: - if kk in self.alias_map: - key = self.alias_map[kk] - self._add_single(key, jdata[kk]) - self._check_must() - return self.get_dict() - - def get_dict(self) -> Dict[str, Any]: - """Get dictionary built from rules defined by add method. - - Returns - ------- - Dict[str, Any] - settings dictionary with default values - """ - ret = {} - for kk in self.arg_dict.keys(): - ret[kk] = self.arg_dict[kk]["value"] - return ret - - # TODO maybe rename this to j_deprecated and only warn about deprecated keys, # TODO if the deprecated_key argument is left empty function puppose is only custom # TODO error since dict[key] already raises KeyError when the key is missing diff --git a/deepmd/descriptor/hybrid.py b/deepmd/descriptor/hybrid.py index d41d826294..9ccda8c9a6 100644 --- a/deepmd/descriptor/hybrid.py +++ b/deepmd/descriptor/hybrid.py @@ -2,7 +2,6 @@ from typing import Tuple, List from deepmd.env import tf -from deepmd.common import ClassArg from deepmd.env import op_module from deepmd.env import GLOBAL_TF_FLOAT_PRECISION from deepmd.env import GLOBAL_NP_FLOAT_PRECISION @@ -51,10 +50,6 @@ def __init__ (self, formatted_descript_list.append(Descriptor(**ii)) else: raise NotImplementedError - # args = ClassArg()\ - # .add('list', list, must = True) - # class_data = args.parse(jdata) - # dict_list = class_data['list'] self.descrpt_list = formatted_descript_list self.numb_descrpt = len(self.descrpt_list) for ii in range(1, self.numb_descrpt): diff --git a/deepmd/descriptor/loc_frame.py b/deepmd/descriptor/loc_frame.py index 4aedf8b247..d96b38385c 100644 --- a/deepmd/descriptor/loc_frame.py +++ b/deepmd/descriptor/loc_frame.py @@ -46,12 +46,6 @@ def __init__(self, """ Constructor """ - # args = ClassArg()\ - # .add('sel_a', list, must = True) \ - # .add('sel_r', list, must = True) \ - # .add('rcut', float, default = 6.0) \ - # .add('axis_rule',list, must = True) - # class_data = args.parse(jdata) self.sel_a = sel_a self.sel_r = sel_r self.axis_rule = axis_rule diff --git a/deepmd/descriptor/se_a_ebd.py b/deepmd/descriptor/se_a_ebd.py index 276a716482..66034078ea 100644 --- a/deepmd/descriptor/se_a_ebd.py +++ b/deepmd/descriptor/se_a_ebd.py @@ -2,7 +2,7 @@ from typing import Optional, Tuple, List from deepmd.env import tf -from deepmd.common import ClassArg, get_activation_func, get_precision, add_data_requirement +from deepmd.common import get_activation_func, get_precision, add_data_requirement from deepmd.utils.network import one_layer from deepmd.env import GLOBAL_TF_FLOAT_PRECISION from deepmd.env import GLOBAL_NP_FLOAT_PRECISION @@ -75,12 +75,6 @@ def __init__ (self, """ Constructor """ - # args = ClassArg()\ - # .add('type_nchanl', int, default = 4) \ - # .add('type_nlayer', int, default = 2) \ - # .add('type_one_side', bool, default = True) \ - # .add('numb_aparam', int, default = 0) - # class_data = args.parse(jdata) DescrptSeA.__init__(self, rcut, rcut_smth, diff --git a/deepmd/descriptor/se_a_ef.py b/deepmd/descriptor/se_a_ef.py index 572a54ec85..c272ed7491 100644 --- a/deepmd/descriptor/se_a_ef.py +++ b/deepmd/descriptor/se_a_ef.py @@ -309,25 +309,6 @@ def __init__ (self, precision, uniform_seed ) - # DescrptSeA.__init__(self, **jdata) - # args = ClassArg()\ - # .add('sel', list, must = True) \ - # .add('rcut', float, default = 6.0) \ - # .add('rcut_smth',float, default = 5.5) \ - # .add('neuron', list, default = [10, 20, 40]) \ - # .add('axis_neuron', int, default = 4, alias = 'n_axis_neuron') \ - # .add('resnet_dt',bool, default = False) \ - # .add('trainable',bool, default = True) \ - # .add('seed', int) - # class_data = args.parse(jdata) - # self.sel_a = class_data['sel'] - # self.rcut_r = class_data['rcut'] - # self.rcut_r_smth = class_data['rcut_smth'] - # self.filter_neuron = class_data['neuron'] - # self.n_axis_neuron = class_data['axis_neuron'] - # self.filter_resnet_dt = class_data['resnet_dt'] - # self.seed = class_data['seed'] - # self.trainable = class_data['trainable'] self.sel_a = sel self.rcut_r = rcut self.rcut_r_smth = rcut_smth diff --git a/deepmd/descriptor/se_r.py b/deepmd/descriptor/se_r.py index 9bc0aa4bfc..43ea6d4954 100644 --- a/deepmd/descriptor/se_r.py +++ b/deepmd/descriptor/se_r.py @@ -70,20 +70,6 @@ def __init__ (self, """ Constructor """ - # args = ClassArg()\ - # .add('sel', list, must = True) \ - # .add('rcut', float, default = 6.0) \ - # .add('rcut_smth',float, default = 0.5) \ - # .add('neuron', list, default = [10, 20, 40]) \ - # .add('resnet_dt',bool, default = False) \ - # .add('trainable',bool, default = True) \ - # .add('seed', int) \ - # .add('type_one_side', bool, default = False) \ - # .add('exclude_types', list, default = []) \ - # .add('set_davg_zero', bool, default = False) \ - # .add("activation_function", str, default = "tanh") \ - # .add("precision", str, default = "default") - # class_data = args.parse(jdata) if rcut < rcut_smth: raise RuntimeError("rcut_smth (%f) should be no more than rcut (%f)!" % (rcut_smth, rcut)) self.sel_r = sel diff --git a/deepmd/fit/__init__.py b/deepmd/fit/__init__.py index 71d582e8ca..3f94c00a43 100644 --- a/deepmd/fit/__init__.py +++ b/deepmd/fit/__init__.py @@ -1,6 +1,4 @@ from .ener import EnerFitting -from .wfc import WFCFitting from .dipole import DipoleFittingSeA from .polar import PolarFittingSeA from .polar import GlobalPolarFittingSeA -from .polar import PolarFittingLocFrame diff --git a/deepmd/fit/dipole.py b/deepmd/fit/dipole.py index 683f7f6443..932c834577 100644 --- a/deepmd/fit/dipole.py +++ b/deepmd/fit/dipole.py @@ -51,14 +51,6 @@ def __init__ (self, """ self.ntypes = descrpt.get_ntypes() self.dim_descrpt = descrpt.get_dim_out() - # args = ClassArg()\ - # .add('neuron', list, default = [120,120,120], alias = 'n_neuron')\ - # .add('resnet_dt', bool, default = True)\ - # .add('sel_type', [list,int], default = [ii for ii in range(self.ntypes)], alias = 'dipole_type')\ - # .add('seed', int)\ - # .add("activation_function", str, default = "tanh")\ - # .add('precision', str, default = "default") - # class_data = args.parse(jdata) self.n_neuron = neuron self.resnet_dt = resnet_dt self.sel_type = sel_type diff --git a/deepmd/fit/polar.py b/deepmd/fit/polar.py index 6e9a18a6f0..2bd07b847b 100644 --- a/deepmd/fit/polar.py +++ b/deepmd/fit/polar.py @@ -14,93 +14,6 @@ from deepmd.env import GLOBAL_TF_FLOAT_PRECISION -class PolarFittingLocFrame () : - """ - Fitting polarizability with local frame descriptor. - - .. deprecated:: 2.0.0 - This class is not supported any more. - """ - def __init__ (self, jdata, descrpt) : - if not isinstance(descrpt, DescrptLocFrame) : - raise RuntimeError('PolarFittingLocFrame only supports DescrptLocFrame') - self.ntypes = descrpt.get_ntypes() - self.dim_descrpt = descrpt.get_dim_out() - args = ClassArg()\ - .add('neuron', list, default = [120,120,120], alias = 'n_neuron')\ - .add('resnet_dt', bool, default = True)\ - .add('sel_type', [list,int], default = [ii for ii in range(self.ntypes)], alias = 'pol_type')\ - .add('seed', int)\ - .add("activation_function", str, default = "tanh")\ - .add('precision', str, default = "default") - class_data = args.parse(jdata) - self.n_neuron = class_data['neuron'] - self.resnet_dt = class_data['resnet_dt'] - self.sel_type = class_data['sel_type'] - self.seed = class_data['seed'] - self.fitting_activation_fn = get_activation_func(class_data["activation_function"]) - self.fitting_precision = get_precision(class_data['precision']) - self.useBN = False - - def get_sel_type(self): - return self.sel_type - - def get_out_size(self): - return 9 - - def build (self, - input_d, - rot_mat, - natoms, - reuse = None, - suffix = '') : - start_index = 0 - inputs = tf.cast(tf.reshape(input_d, [-1, natoms[0], self.dim_descrpt]), self.fitting_precision) - rot_mat = tf.reshape(rot_mat, [-1, 9 * natoms[0]]) - - count = 0 - outs_list = [] - for type_i in range(self.ntypes): - # cut-out inputs - inputs_i = tf.slice (inputs, - [ 0, start_index, 0], - [-1, natoms[2+type_i], -1] ) - inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) - rot_mat_i = tf.slice (rot_mat, - [ 0, start_index* 9], - [-1, natoms[2+type_i]* 9] ) - rot_mat_i = tf.reshape(rot_mat_i, [-1, 3, 3]) - start_index += natoms[2+type_i] - if not type_i in self.sel_type : - continue - layer = inputs_i - for ii in range(0,len(self.n_neuron)) : - if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] : - layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision) - else : - layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision) - # (nframes x natoms) x 9 - final_layer = one_layer(layer, 9, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision, final_layer = True) - # (nframes x natoms) x 3 x 3 - final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], 3, 3]) - # (nframes x natoms) x 3 x 3 - final_layer = final_layer + tf.transpose(final_layer, perm = [0,2,1]) - # (nframes x natoms) x 3 x 3(coord) - final_layer = tf.matmul(final_layer, rot_mat_i) - # (nframes x natoms) x 3(coord) x 3(coord) - final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a = True) - # nframes x natoms x 3 x 3 - final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], 3, 3]) - - # concat the results - outs_list.append(final_layer) - count += 1 - outs = tf.concat(outs_list, axis = 1) - - tf.summary.histogram('fitting_net_output', outs) - return tf.cast(tf.reshape(outs, [-1]), self.fitting_precision) - - class PolarFittingSeA (Fitting) : """ Fit the atomic polarizability with descriptor se_a @@ -150,17 +63,6 @@ def __init__ (self, """ self.ntypes = descrpt.get_ntypes() self.dim_descrpt = descrpt.get_dim_out() - # args = ClassArg()\ - # .add('neuron', list, default = [120,120,120], alias = 'n_neuron')\ - # .add('resnet_dt', bool, default = True)\ - # .add('fit_diag', bool, default = True)\ - # .add('diag_shift', [list,float], default = [0.0 for ii in range(self.ntypes)])\ - # .add('scale', [list,float], default = [1.0 for ii in range(self.ntypes)])\ - # .add('sel_type', [list,int], default = [ii for ii in range(self.ntypes)], alias = 'pol_type')\ - # .add('seed', int)\ - # .add("activation_function", str , default = "tanh")\ - # .add('precision', str, default = "default") - # class_data = args.parse(jdata) self.n_neuron = neuron self.resnet_dt = resnet_dt self.sel_type = sel_type diff --git a/deepmd/fit/wfc.py b/deepmd/fit/wfc.py deleted file mode 100644 index 4a07d804e2..0000000000 --- a/deepmd/fit/wfc.py +++ /dev/null @@ -1,106 +0,0 @@ -import warnings -import numpy as np -from typing import Tuple, List - -from deepmd.env import tf -from deepmd.common import ClassArg, add_data_requirement, get_activation_func, get_precision -from deepmd.utils.network import one_layer, one_layer_rand_seed_shift -from deepmd.descriptor import DescrptLocFrame - -from deepmd.env import global_cvt_2_tf_float -from deepmd.env import GLOBAL_TF_FLOAT_PRECISION - -class WFCFitting () : - """ - Fitting Wannier function centers (WFCs) with local frame descriptor. - - .. deprecated:: 2.0.0 - This class is not supported any more. - """ - def __init__ (self, jdata, descrpt): - if not isinstance(descrpt, DescrptLocFrame) : - raise RuntimeError('WFC only supports DescrptLocFrame') - self.ntypes = descrpt.get_ntypes() - self.dim_descrpt = descrpt.get_dim_out() - args = ClassArg()\ - .add('neuron', list, default = [120,120,120], alias = 'n_neuron')\ - .add('resnet_dt', bool, default = True)\ - .add('wfc_numb', int, must = True)\ - .add('sel_type', [list,int], default = [ii for ii in range(self.ntypes)], alias = 'wfc_type')\ - .add('seed', int)\ - .add("activation_function", str, default = "tanh")\ - .add('precision', str, default = "default")\ - .add('uniform_seed', bool, default = False) - class_data = args.parse(jdata) - self.n_neuron = class_data['neuron'] - self.resnet_dt = class_data['resnet_dt'] - self.wfc_numb = class_data['wfc_numb'] - self.sel_type = class_data['sel_type'] - self.seed = class_data['seed'] - self.uniform_seed = class_data['uniform_seed'] - self.seed_shift = one_layer_rand_seed_shift() - self.fitting_activation_fn = get_activation_func(class_data["activation_function"]) - self.fitting_precision = get_precision(class_data['precision']) - self.useBN = False - - - def get_sel_type(self): - return self.sel_type - - def get_wfc_numb(self): - return self.wfc_numb - - def get_out_size(self): - return self.wfc_numb * 3 - - def build (self, - input_d, - rot_mat, - natoms, - input_dict = None, - reuse = None, - suffix = '') : - start_index = 0 - inputs = tf.cast(tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision) - rot_mat = tf.reshape(rot_mat, [-1, 9 * natoms[0]]) - - count = 0 - for type_i in range(self.ntypes): - # cut-out inputs - inputs_i = tf.slice (inputs, - [ 0, start_index* self.dim_descrpt], - [-1, natoms[2+type_i]* self.dim_descrpt] ) - inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt]) - rot_mat_i = tf.slice (rot_mat, - [ 0, start_index* 9], - [-1, natoms[2+type_i]* 9] ) - rot_mat_i = tf.reshape(rot_mat_i, [-1, 3, 3]) - start_index += natoms[2+type_i] - if not type_i in self.sel_type : - continue - layer = inputs_i - for ii in range(0,len(self.n_neuron)) : - if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] : - layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed) - else : - layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed) - if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift - # (nframes x natoms) x (nwfc x 3) - final_layer = one_layer(layer, self.wfc_numb * 3, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision, uniform_seed = self.uniform_seed) - if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift - # (nframes x natoms) x nwfc(wc) x 3(coord_local) - final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.wfc_numb, 3]) - # (nframes x natoms) x nwfc(wc) x 3(coord) - final_layer = tf.matmul(final_layer, rot_mat_i) - # nframes x natoms x nwfc(wc) x 3(coord_local) - final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], self.wfc_numb, 3]) - - # concat the results - if count == 0: - outs = final_layer - else: - outs = tf.concat([outs, final_layer], axis = 1) - count += 1 - - tf.summary.histogram('fitting_net_output', outs) - return tf.cast(tf.reshape(outs, [-1]), GLOBAL_TF_FLOAT_PRECISION) diff --git a/deepmd/infer/ewald_recp.py b/deepmd/infer/ewald_recp.py index 227fe2a7e2..05293e91bd 100644 --- a/deepmd/infer/ewald_recp.py +++ b/deepmd/infer/ewald_recp.py @@ -2,7 +2,6 @@ from typing import Tuple, List from deepmd.env import tf -from deepmd.common import ClassArg from deepmd.env import GLOBAL_TF_FLOAT_PRECISION from deepmd.env import GLOBAL_NP_FLOAT_PRECISION from deepmd.env import GLOBAL_ENER_FLOAT_PRECISION diff --git a/deepmd/loss/ener.py b/deepmd/loss/ener.py index 34e2f84326..c48fa3d508 100644 --- a/deepmd/loss/ener.py +++ b/deepmd/loss/ener.py @@ -1,6 +1,6 @@ import numpy as np from deepmd.env import tf -from deepmd.common import ClassArg, add_data_requirement +from deepmd.common import add_data_requirement from deepmd.env import global_cvt_2_tf_float from deepmd.env import global_cvt_2_ener_float @@ -173,75 +173,7 @@ def eval(self, sess, feed_dict, natoms): results["rmse_v"] = np.sqrt(error_v) / natoms[0] if self.has_pf: results["rmse_pf"] = np.sqrt(error_pf) - return results - - def print_header(self): # depreciated - prop_fmt = ' %11s %11s' - print_str = '' - print_str += prop_fmt % ('rmse_tst', 'rmse_trn') - if self.has_e : - print_str += prop_fmt % ('rmse_e_tst', 'rmse_e_trn') - if self.has_ae : - print_str += prop_fmt % ('rmse_ae_tst', 'rmse_ae_trn') - if self.has_f : - print_str += prop_fmt % ('rmse_f_tst', 'rmse_f_trn') - if self.has_v : - print_str += prop_fmt % ('rmse_v_tst', 'rmse_v_trn') - if self.has_pf : - print_str += prop_fmt % ('rmse_pf_tst', 'rmse_pf_trn') - return print_str - - def print_on_training(self, - tb_writer, - cur_batch, - sess, - natoms, - feed_dict_test, - feed_dict_batch): # depreciated - - run_data = [ - self.l2_l, - self.l2_more['l2_ener_loss'], - self.l2_more['l2_force_loss'], - self.l2_more['l2_virial_loss'], - self.l2_more['l2_atom_ener_loss'], - self.l2_more['l2_pref_force_loss'] - ] - - # first train data - train_out = run_sess(sess, run_data, feed_dict=feed_dict_batch) - error_train, error_e_train, error_f_train, error_v_train, error_ae_train, error_pf_train = train_out - - # than test data, if tensorboard log writter is present, commpute summary - # and write tensorboard logs - if tb_writer: - summary_merged_op = tf.summary.merge([self.l2_loss_summary, self.l2_loss_ener_summary, self.l2_loss_force_summary, self.l2_loss_virial_summary]) - run_data.insert(0, summary_merged_op) - - test_out = run_sess(sess, run_data, feed_dict=feed_dict_test) - - if tb_writer: - summary = test_out.pop(0) - tb_writer.add_summary(summary, cur_batch) - - error_test, error_e_test, error_f_test, error_v_test, error_ae_test, error_pf_test = test_out - - - print_str = "" - prop_fmt = " %11.2e %11.2e" - print_str += prop_fmt % (np.sqrt(error_test), np.sqrt(error_train)) - if self.has_e : - print_str += prop_fmt % (np.sqrt(error_e_test) / natoms[0], np.sqrt(error_e_train) / natoms[0]) - if self.has_ae : - print_str += prop_fmt % (np.sqrt(error_ae_test), np.sqrt(error_ae_train)) - if self.has_f : - print_str += prop_fmt % (np.sqrt(error_f_test), np.sqrt(error_f_train)) - if self.has_v : - print_str += prop_fmt % (np.sqrt(error_v_test) / natoms[0], np.sqrt(error_v_train) / natoms[0]) - if self.has_pf: - print_str += prop_fmt % (np.sqrt(error_pf_test), np.sqrt(error_pf_train)) - - return print_str + return results class EnerDipoleLoss (Loss) : @@ -252,17 +184,11 @@ def __init__ (self, start_pref_ed : float = 1.0, limit_pref_ed : float = 1.0 ) -> None : - self.starter_learning_rate = kwarg['starter_learning_rate'] - args = ClassArg()\ - .add('start_pref_e', float, must = True, default = 0.1) \ - .add('limit_pref_e', float, must = True, default = 1.00)\ - .add('start_pref_ed', float, must = True, default = 1.00)\ - .add('limit_pref_ed', float, must = True, default = 1.00) - class_data = args.parse(jdata) - self.start_pref_e = class_data['start_pref_e'] - self.limit_pref_e = class_data['limit_pref_e'] - self.start_pref_ed = class_data['start_pref_ed'] - self.limit_pref_ed = class_data['limit_pref_ed'] + self.starter_learning_rate = starter_learning_rate + self.start_pref_e = start_pref_e + self.limit_pref_e = limit_pref_e + self.start_pref_ed = start_pref_ed + self.limit_pref_ed = limit_pref_ed # data required add_data_requirement('energy', 1, atomic=False, must=True, high_prec=True) add_data_requirement('energy_dipole', 3, atomic=False, must=True, high_prec=False) @@ -330,58 +256,3 @@ def eval(self, sess, feed_dict, natoms): 'rmse_ed': np.sqrt(error_ed) } return results - - @staticmethod - def print_header() : # depreciated - prop_fmt = ' %9s %9s' - print_str = '' - print_str += prop_fmt % ('l2_tst', 'l2_trn') - print_str += prop_fmt % ('l2_e_tst', 'l2_e_trn') - print_str += prop_fmt % ('l2_ed_tst', 'l2_ed_trn') - return print_str - - def print_on_training(self, - tb_writer, - cur_batch, - sess, - natoms, - feed_dict_test, - feed_dict_batch): # depreciated - - run_data = [ - self.l2_l, - self.l2_more['l2_ener_loss'], - self.l2_more['l2_ener_dipole_loss'] - ] - - # first train data - train_out = run_sess(sess, run_data, feed_dict=feed_dict_batch) - error_train, error_e_train, error_ed_train = train_out - - # than test data, if tensorboard log writter is present, commpute summary - # and write tensorboard logs - if tb_writer: - summary_merged_op = tf.summary.merge([ - self.l2_loss_summary, - self.l2_loss_ener_summary, - self.l2_ener_dipole_loss_summary - ]) - run_data.insert(0, summary_merged_op) - - test_out = run_sess(sess, run_data, feed_dict=feed_dict_test) - - if tb_writer: - summary = test_out.pop(0) - tb_writer.add_summary(summary, cur_batch) - - error_test, error_e_test, error_ed_test = test_out - - print_str = "" - prop_fmt = " %9.2e %9.2e" - print_str += prop_fmt % (np.sqrt(error_test), np.sqrt(error_train)) - print_str += prop_fmt % (np.sqrt(error_e_test) / natoms[0], np.sqrt(error_e_train) / natoms[0]) - print_str += prop_fmt % (np.sqrt(error_ed_test), np.sqrt(error_ed_train)) - return print_str - - - diff --git a/deepmd/loss/tensor.py b/deepmd/loss/tensor.py index 91cbbd1e07..e7cbde6ebd 100644 --- a/deepmd/loss/tensor.py +++ b/deepmd/loss/tensor.py @@ -1,6 +1,6 @@ import numpy as np from deepmd.env import tf -from deepmd.common import ClassArg, add_data_requirement +from deepmd.common import add_data_requirement from deepmd.env import global_cvt_2_tf_float from deepmd.env import global_cvt_2_ener_float @@ -134,64 +134,3 @@ def eval(self, sess, feed_dict, natoms): if self.global_weight > 0.0: results["rmse_gl"] = np.sqrt(error_gl) / atoms return results - - def print_header(self): # depreciated - prop_fmt = ' %11s %11s' - print_str = '' - print_str += prop_fmt % ('rmse_tst', 'rmse_trn') - if self.local_weight > 0.0: - print_str += prop_fmt % ('rmse_lc_tst', 'rmse_lc_trn') - if self.global_weight > 0.0: - print_str += prop_fmt % ('rmse_gl_tst', 'rmse_gl_trn') - return print_str - - def print_on_training(self, - tb_writer, - cur_batch, - sess, - natoms, - feed_dict_test, - feed_dict_batch) : # depreciated - - # YHT: added to calculate the atoms number - atoms = 0 - if self.type_sel is not None: - for w in self.type_sel: - atoms += natoms[2+w] - else: - atoms = natoms[0] - - run_data = [self.l2_l, self.l2_more['local_loss'], self.l2_more['global_loss']] - summary_list = [self.l2_loss_summary] - if self.local_weight > 0.0: - summary_list.append(self.l2_loss_local_summary) - if self.global_weight > 0.0: - summary_list.append(self.l2_loss_global_summary) - - # first train data - error_train = run_sess(sess, run_data, feed_dict=feed_dict_batch) - - # than test data, if tensorboard log writter is present, commpute summary - # and write tensorboard logs - if tb_writer: - #summary_merged_op = tf.summary.merge([self.l2_loss_summary]) - summary_merged_op = tf.summary.merge(summary_list) - run_data.insert(0, summary_merged_op) - - test_out = run_sess(sess, run_data, feed_dict=feed_dict_test) - - if tb_writer: - summary = test_out.pop(0) - tb_writer.add_summary(summary, cur_batch) - - error_test = test_out - - print_str = "" - prop_fmt = " %11.2e %11.2e" - print_str += prop_fmt % (np.sqrt(error_test[0]), np.sqrt(error_train[0])) - if self.local_weight > 0.0: - print_str += prop_fmt % (np.sqrt(error_test[1]), np.sqrt(error_train[1]) ) - if self.global_weight > 0.0: - print_str += prop_fmt % (np.sqrt(error_test[2])/atoms, np.sqrt(error_train[2])/atoms) - - return print_str diff --git a/deepmd/model/ener.py b/deepmd/model/ener.py index 7504e22906..170c715fad 100644 --- a/deepmd/model/ener.py +++ b/deepmd/model/ener.py @@ -5,7 +5,6 @@ from deepmd.utils.pair_tab import PairTab from deepmd.utils.graph import load_graph_def, get_tensor_by_name_from_graph from deepmd.utils.errors import GraphWithoutTensorError -from deepmd.common import ClassArg from deepmd.env import global_cvt_2_ener_float, MODEL_VERSION, GLOBAL_TF_FLOAT_PRECISION from deepmd.env import op_module from .model import Model diff --git a/deepmd/model/tensor.py b/deepmd/model/tensor.py index ccc23757e2..26c813cfbf 100644 --- a/deepmd/model/tensor.py +++ b/deepmd/model/tensor.py @@ -2,7 +2,6 @@ from typing import Tuple, List from deepmd.env import tf -from deepmd.common import ClassArg from deepmd.env import global_cvt_2_ener_float, MODEL_VERSION, GLOBAL_TF_FLOAT_PRECISION from deepmd.env import op_module from deepmd.utils.graph import load_graph_def diff --git a/deepmd/train/trainer.py b/deepmd/train/trainer.py index 197b9ad138..49bdd7b141 100644 --- a/deepmd/train/trainer.py +++ b/deepmd/train/trainer.py @@ -14,7 +14,7 @@ from deepmd.env import get_tf_session_config from deepmd.env import GLOBAL_TF_FLOAT_PRECISION from deepmd.env import GLOBAL_ENER_FLOAT_PRECISION -from deepmd.fit import EnerFitting, WFCFitting, PolarFittingLocFrame, PolarFittingSeA, GlobalPolarFittingSeA, DipoleFittingSeA +from deepmd.fit import EnerFitting, PolarFittingSeA, DipoleFittingSeA from deepmd.descriptor import Descriptor from deepmd.model import EnerModel, WFCModel, DipoleModel, PolarModel, GlobalPolarModel, MultiModel from deepmd.loss import EnerStdLoss, EnerDipoleLoss, TensorLoss @@ -34,7 +34,7 @@ # load grad of force module import deepmd.op -from deepmd.common import j_must_have, ClassArg, data_requirement, get_precision +from deepmd.common import j_must_have, data_requirement, get_precision log = logging.getLogger(__name__) @@ -95,13 +95,9 @@ def _init_param(self, jdata): def fitting_net_init(fitting_type_, descrpt_type_, params): if fitting_type_ == 'ener': return EnerFitting(**params) - # elif fitting_type == 'wfc': - # self.fitting = WFCFitting(fitting_param, self.descrpt) elif fitting_type_ == 'dipole': return DipoleFittingSeA(**params) elif fitting_type_ == 'polar': - # if descrpt_type == 'loc_frame': - # self.fitting = PolarFittingLocFrame(fitting_param, self.descrpt) return PolarFittingSeA(**params) # elif fitting_type_ == 'global_polar': # if descrpt_type_ == 'se_e2_a': diff --git a/deepmd/utils/__init__.py b/deepmd/utils/__init__.py index e81b474095..c49afa752f 100644 --- a/deepmd/utils/__init__.py +++ b/deepmd/utils/__init__.py @@ -1,10 +1,6 @@ # from .data import DeepmdData from .data_system import DeepmdDataSystem - -# out-of-dated -from .data import DataSets -from .data_system import DataSystem from .pair_tab import PairTab from .learning_rate import LearningRateExp from .plugin import Plugin, PluginVariant diff --git a/deepmd/utils/data.py b/deepmd/utils/data.py index be270209f6..9202872f76 100644 --- a/deepmd/utils/data.py +++ b/deepmd/utils/data.py @@ -565,268 +565,3 @@ def _check_pbc(self, sys_path: DPPath): def _check_mode(self, set_path: DPPath): return (set_path / 'real_atom_types.npy').is_file() - - -class DataSets (object): - """ - Outdated class for one data system. - - .. deprecated:: 2.0.0 - This class is not maintained any more. - """ - def __init__ (self, - sys_path, - set_prefix, - seed = None, - shuffle_test = True) : - self.dirs = glob.glob (os.path.join(sys_path, set_prefix + ".*")) - self.dirs.sort() - # load atom type - self.atom_type, self.idx_map, self.idx3_map = self.load_type (sys_path) - # load atom type map - self.type_map = self.load_type_map(sys_path) - if self.type_map is not None: - assert(len(self.type_map) >= max(self.atom_type)+1) - # train dirs - self.test_dir = self.dirs[-1] - if len(self.dirs) == 1 : - self.train_dirs = self.dirs - else : - self.train_dirs = self.dirs[:-1] - # check fparam - has_fparam = [ os.path.isfile(os.path.join(ii, 'fparam.npy')) for ii in self.dirs ] - if any(has_fparam) and (not all(has_fparam)) : - raise RuntimeError("system %s: if any set has frame parameter, then all sets should have frame parameter" % sys_path) - if all(has_fparam) : - self.has_fparam = 0 - else : - self.has_fparam = -1 - # check aparam - has_aparam = [ os.path.isfile(os.path.join(ii, 'aparam.npy')) for ii in self.dirs ] - if any(has_aparam) and (not all(has_aparam)) : - raise RuntimeError("system %s: if any set has frame parameter, then all sets should have frame parameter" % sys_path) - if all(has_aparam) : - self.has_aparam = 0 - else : - self.has_aparam = -1 - # energy norm - self.eavg = self.stats_energy() - # load sets - self.set_count = 0 - self.load_batch_set (self.train_dirs[self.set_count % self.get_numb_set()]) - self.load_test_set (self.test_dir, shuffle_test) - - def check_batch_size (self, batch_size) : - for ii in self.train_dirs : - tmpe = np.load(os.path.join(ii, "coord.npy")) - if tmpe.shape[0] < batch_size : - return ii, tmpe.shape[0] - return None - - def check_test_size (self, test_size) : - tmpe = np.load(os.path.join(self.test_dir, "coord.npy")) - if tmpe.shape[0] < test_size : - return self.test_dir, tmpe.shape[0] - else : - return None - - def load_type (self, sys_path) : - atom_type = np.loadtxt (os.path.join(sys_path, "type.raw"), dtype=np.int32, ndmin=1) - natoms = atom_type.shape[0] - idx = np.arange (natoms) - idx_map = np.lexsort ((idx, atom_type)) - atom_type3 = np.repeat(atom_type, 3) - idx3 = np.arange (natoms * 3) - idx3_map = np.lexsort ((idx3, atom_type3)) - return atom_type, idx_map, idx3_map - - def load_type_map(self, sys_path) : - fname = os.path.join(sys_path, 'type_map.raw') - if os.path.isfile(fname) : - with open(os.path.join(sys_path, 'type_map.raw')) as fp: - return fp.read().split() - else : - return None - - def get_type_map(self) : - return self.type_map - - def get_numb_set (self) : - return len (self.train_dirs) - - def stats_energy (self) : - eners = np.array([]) - for ii in self.train_dirs: - ener_file = os.path.join(ii, "energy.npy") - if os.path.isfile(ener_file) : - ei = np.load(ener_file) - eners = np.append(eners, ei) - if eners.size == 0 : - return 0 - else : - return np.average(eners) - - def load_energy(self, - set_name, - nframes, - nvalues, - energy_file, - atom_energy_file) : - """ - return : coeff_ener, ener, coeff_atom_ener, atom_ener - """ - # load atom_energy - coeff_atom_ener, atom_ener = self.load_data(set_name, atom_energy_file, [nframes, nvalues], False) - # ignore energy_file - if coeff_atom_ener == 1: - ener = np.sum(atom_ener, axis = 1) - coeff_ener = 1 - # load energy_file - else: - coeff_ener, ener = self.load_data(set_name, energy_file, [nframes], False) - return coeff_ener, ener, coeff_atom_ener, atom_ener - - def load_data(self, set_name, data_name, shape, is_necessary = True): - path = os.path.join(set_name, data_name+".npy") - if os.path.isfile (path) : - data = np.load(path) - data = np.reshape(data, shape) - if is_necessary: - return data - return 1, data - elif is_necessary: - raise OSError("%s not found!" % path) - else: - data = np.zeros(shape) - return 0, data - - def load_set(self, set_name, shuffle = True): - data = {} - data["box"] = self.load_data(set_name, "box", [-1, 9]) - nframe = data["box"].shape[0] - data["coord"] = self.load_data(set_name, "coord", [nframe, -1]) - ncoord = data["coord"].shape[1] - if self.has_fparam >= 0: - data["fparam"] = self.load_data(set_name, "fparam", [nframe, -1]) - if self.has_fparam == 0 : - self.has_fparam = data["fparam"].shape[1] - else : - assert self.has_fparam == data["fparam"].shape[1] - if self.has_aparam >= 0: - data["aparam"] = self.load_data(set_name, "aparam", [nframe, -1]) - if self.has_aparam == 0 : - self.has_aparam = data["aparam"].shape[1] // (ncoord//3) - else : - assert self.has_aparam == data["aparam"].shape[1] // (ncoord//3) - data["prop_c"] = np.zeros(5) - data["prop_c"][0], data["energy"], data["prop_c"][3], data["atom_ener"] \ - = self.load_energy (set_name, nframe, ncoord // 3, "energy", "atom_ener") - data["prop_c"][1], data["force"] = self.load_data(set_name, "force", [nframe, ncoord], False) - data["prop_c"][2], data["virial"] = self.load_data(set_name, "virial", [nframe, 9], False) - data["prop_c"][4], data["atom_pref"] = self.load_data(set_name, "atom_pref", [nframe, ncoord//3], False) - data["atom_pref"] = np.repeat(data["atom_pref"], 3, axis=1) - # shuffle data - if shuffle: - idx = np.arange (nframe) - dp_random.shuffle(idx) - for ii in data: - if ii != "prop_c": - data[ii] = data[ii][idx] - data["type"] = np.tile (self.atom_type, (nframe, 1)) - # sort according to type - for ii in ["type", "atom_ener"]: - data[ii] = data[ii][:, self.idx_map] - for ii in ["coord", "force", "atom_pref"]: - data[ii] = data[ii][:, self.idx3_map] - return data - - def load_batch_set (self, - set_name) : - self.batch_set = self.load_set(set_name, True) - self.reset_iter () - - def load_test_set (self, - set_name, - shuffle_test) : - self.test_set = self.load_set(set_name, shuffle_test) - - def reset_iter (self) : - self.iterator = 0 - self.set_count += 1 - - def get_set(self, data, idx = None) : - new_data = {} - for ii in data: - dd = data[ii] - if ii == "prop_c": - new_data[ii] = dd.astype(np.float32) - else: - if idx is not None: - dd = dd[idx] - if ii == "type": - new_data[ii] = dd - else: - new_data[ii] = dd.astype(GLOBAL_NP_FLOAT_PRECISION) - return new_data - - def get_test (self) : - """ - returned property prefector [4] in order: - energy, force, virial, atom_ener - """ - return self.get_set(self.test_set) - - def get_batch (self, - batch_size) : - """ - returned property prefector [4] in order: - energy, force, virial, atom_ener - """ - set_size = self.batch_set["energy"].shape[0] - # assert (batch_size <= set_size), "batch size should be no more than set size" - if self.iterator + batch_size > set_size : - self.load_batch_set (self.train_dirs[self.set_count % self.get_numb_set()]) - set_size = self.batch_set["energy"].shape[0] - # print ("%d %d %d" % (self.iterator, self.iterator + batch_size, set_size)) - iterator_1 = self.iterator + batch_size - if iterator_1 >= set_size : - iterator_1 = set_size - idx = np.arange (self.iterator, iterator_1) - self.iterator += batch_size - return self.get_set(self.batch_set, idx) - - def get_natoms (self) : - sample_type = self.batch_set["type"][0] - natoms = len(sample_type) - return natoms - - def get_natoms_2 (self, ntypes) : - sample_type = self.batch_set["type"][0] - natoms = len(sample_type) - natoms_vec = np.zeros (ntypes).astype(int) - for ii in range (ntypes) : - natoms_vec[ii] = np.count_nonzero(sample_type == ii) - return natoms, natoms_vec - - def get_natoms_vec (self, ntypes) : - natoms, natoms_vec = self.get_natoms_2 (ntypes) - tmp = [natoms, natoms] - tmp = np.append (tmp, natoms_vec) - return tmp.astype(np.int32) - - def set_numb_batch (self, - batch_size) : - return self.batch_set["energy"].shape[0] // batch_size - - def get_sys_numb_batch (self, batch_size) : - return self.set_numb_batch(batch_size) * self.get_numb_set() - - def get_ener (self) : - return self.eavg - - def numb_fparam(self) : - return self.has_fparam - - def numb_aparam(self) : - return self.has_aparam - diff --git a/deepmd/utils/data_system.py b/deepmd/utils/data_system.py index c8ce9ec07d..a4faf4cc22 100644 --- a/deepmd/utils/data_system.py +++ b/deepmd/utils/data_system.py @@ -8,7 +8,6 @@ from typing import Tuple, List from deepmd.utils import random as dp_random -from deepmd.utils.data import DataSets from deepmd.utils.data import DeepmdData log = logging.getLogger(__name__) @@ -533,217 +532,3 @@ def _prob_sys_size_ext(self, keywords): tmp_prob = [float(i) for i in nbatch_block] / np.sum(nbatch_block) sys_probs[block_stt[ii]:block_end[ii]] = tmp_prob * block_probs[ii] return sys_probs - - - -class DataSystem (object) : - """ - Outdated class for the data systems. - - .. deprecated:: 2.0.0 - This class is not maintained any more. - """ - def __init__ (self, - systems, - set_prefix, - batch_size, - test_size, - rcut, - run_opt = None) : - self.system_dirs = systems - self.nsystems = len(self.system_dirs) - self.batch_size = batch_size - if isinstance(self.batch_size, int) : - self.batch_size = self.batch_size * np.ones(self.nsystems, dtype=int) - assert(isinstance(self.batch_size, (list,np.ndarray))) - assert(len(self.batch_size) == self.nsystems) - self.data_systems = [] - self.ntypes = [] - self.natoms = [] - self.natoms_vec = [] - self.nbatches = [] - for ii in self.system_dirs : - self.data_systems.append(DataSets(ii, set_prefix)) - sys_all_types = np.loadtxt(os.path.join(ii, "type.raw")).astype(int) - self.ntypes.append(np.max(sys_all_types) + 1) - self.sys_ntypes = max(self.ntypes) - type_map = [] - for ii in range(self.nsystems) : - self.natoms.append(self.data_systems[ii].get_natoms()) - self.natoms_vec.append(self.data_systems[ii].get_natoms_vec(self.sys_ntypes).astype(int)) - self.nbatches.append(self.data_systems[ii].get_sys_numb_batch(self.batch_size[ii])) - type_map.append(self.data_systems[ii].get_type_map()) - self.type_map = self.check_type_map_consistency(type_map) - - # check frame parameters - has_fparam = [ii.numb_fparam() for ii in self.data_systems] - for ii in has_fparam : - if ii != has_fparam[0] : - raise RuntimeError("if any system has frame parameter, then all systems should have the same number of frame parameter") - self.has_fparam = has_fparam[0] - - # check the size of data if they satisfy the requirement of batch and test - for ii in range(self.nsystems) : - chk_ret = self.data_systems[ii].check_batch_size(self.batch_size[ii]) - if chk_ret is not None : - raise RuntimeError ("system %s required batch size %d is larger than the size %d of the dataset %s" % \ - (self.system_dirs[ii], self.batch_size[ii], chk_ret[1], chk_ret[0])) - chk_ret = self.data_systems[ii].check_test_size(test_size) - if chk_ret is not None : - print("WARNNING: system %s required test size %d is larger than the size %d of the dataset %s" % \ - (self.system_dirs[ii], test_size, chk_ret[1], chk_ret[0])) - - if run_opt is not None: - self.print_summary(run_opt) - - self.prob_nbatches = [ float(i) for i in self.nbatches] / np.sum(self.nbatches) - - self.test_data = collections.defaultdict(list) - self.default_mesh = [] - for ii in range(self.nsystems) : - test_system_data = self.data_systems[ii].get_test () - for nn in test_system_data: - self.test_data[nn].append(test_system_data[nn]) - cell_size = np.max (rcut) - avg_box = np.average (test_system_data["box"], axis = 0) - avg_box = np.reshape (avg_box, [3,3]) - ncell = (np.linalg.norm(avg_box, axis=1)/ cell_size).astype(np.int32) - ncell[ncell < 2] = 2 - default_mesh = np.zeros (6, dtype = np.int32) - default_mesh[3:6] = ncell - self.default_mesh.append(default_mesh) - self.pick_idx = 0 - - - def check_type_map_consistency(self, type_map_list): - ret = [] - for ii in type_map_list: - if ii is not None: - min_len = min([len(ii), len(ret)]) - for idx in range(min_len) : - if ii[idx] != ret[idx] : - raise RuntimeError('inconsistent type map: %s %s' % (str(ret), str(ii))) - if len(ii) > len(ret) : - ret = ii - return ret - - - def get_type_map(self): - return self.type_map - - - def format_name_length(self, name, width) : - if len(name) <= width: - return '{: >{}}'.format(name, width) - else : - name = name[-(width-3):] - name = '-- ' + name - return name - - def print_summary(self) : - tmp_msg = "" - # width 65 - sys_width = 42 - tmp_msg += "---Summary of DataSystem-----------------------------------------\n" - tmp_msg += "find %d system(s):\n" % self.nsystems - tmp_msg += "%s " % self.format_name_length('system', sys_width) - tmp_msg += "%s %s %s\n" % ('natoms', 'bch_sz', 'n_bch') - for ii in range(self.nsystems) : - tmp_msg += ("%s %6d %6d %5d\n" % - (self.format_name_length(self.system_dirs[ii], sys_width), - self.natoms[ii], - self.batch_size[ii], - self.nbatches[ii]) ) - tmp_msg += "-----------------------------------------------------------------\n" - log.info(tmp_msg) - - def compute_energy_shift(self) : - sys_ener = np.array([]) - for ss in self.data_systems : - sys_ener = np.append(sys_ener, ss.get_ener()) - sys_tynatom = np.array(self.natoms_vec, dtype = float) - sys_tynatom = np.reshape(sys_tynatom, [self.nsystems,-1]) - sys_tynatom = sys_tynatom[:,2:] - energy_shift,resd,rank,s_value \ - = np.linalg.lstsq(sys_tynatom, sys_ener, rcond = 1e-3) - return energy_shift - - def process_sys_weights(self, sys_weights) : - sys_weights = np.array(sys_weights) - type_filter = sys_weights >= 0 - assigned_sum_prob = np.sum(type_filter * sys_weights) - assert assigned_sum_prob <= 1, "the sum of assigned probability should be less than 1" - rest_sum_prob = 1. - assigned_sum_prob - rest_nbatch = (1 - type_filter) * self.nbatches - rest_prob = rest_sum_prob * rest_nbatch / np.sum(rest_nbatch) - ret_prob = rest_prob + type_filter * sys_weights - assert np.sum(ret_prob) == 1, "sum of probs should be 1" - return ret_prob - - def get_batch (self, - sys_idx = None, - sys_weights = None, - style = "prob_sys_size") : - if sys_idx is not None : - self.pick_idx = sys_idx - else : - if sys_weights is None : - if style == "prob_sys_size" : - prob = self.prob_nbatches - elif style == "prob_uniform" : - prob = None - else : - raise RuntimeError("unkown get_batch style") - else : - prob = self.process_sys_weights(sys_weights) - self.pick_idx = dp_random.choice(np.arange(self.nsystems), p=prob) - b_data = self.data_systems[self.pick_idx].get_batch(self.batch_size[self.pick_idx]) - b_data["natoms_vec"] = self.natoms_vec[self.pick_idx] - b_data["default_mesh"] = self.default_mesh[self.pick_idx] - return b_data - - def get_test (self, - sys_idx = None) : - if sys_idx is not None : - idx = sys_idx - else : - idx = self.pick_idx - test_system_data = {} - for nn in self.test_data: - test_system_data[nn] = self.test_data[nn][idx] - test_system_data["natoms_vec"] = self.natoms_vec[idx] - test_system_data["default_mesh"] = self.default_mesh[idx] - return test_system_data - - def get_nbatches (self) : - return self.nbatches - - def get_ntypes (self) : - return self.sys_ntypes - - def get_nsystems (self) : - return self.nsystems - - def get_sys (self, sys_idx) : - return self.data_systems[sys_idx] - - def get_batch_size(self) : - return self.batch_size - - def numb_fparam(self) : - return self.has_fparam - -def _main () : - sys = ['/home/wanghan/study/deep.md/results.01/data/mos2/only_raws/20', - '/home/wanghan/study/deep.md/results.01/data/mos2/only_raws/30', - '/home/wanghan/study/deep.md/results.01/data/mos2/only_raws/38', - '/home/wanghan/study/deep.md/results.01/data/mos2/only_raws/MoS2', - '/home/wanghan/study/deep.md/results.01/data/mos2/only_raws/Pt_cluster'] - set_prefix = 'set' - ds = DataSystem (sys, set_prefix, 4, 6) - r = ds.get_batch() - print(r[1][0]) - -if __name__ == '__main__': - _main() - diff --git a/deepmd/utils/learning_rate.py b/deepmd/utils/learning_rate.py index 8019e1b2bd..e51b2497bb 100644 --- a/deepmd/utils/learning_rate.py +++ b/deepmd/utils/learning_rate.py @@ -1,6 +1,5 @@ import numpy as np from deepmd.env import tf -from deepmd.common import ClassArg class LearningRateExp (object) : r""" @@ -36,12 +35,6 @@ def __init__ (self, """ Constructor """ - # args = ClassArg()\ - # .add('decay_steps', int, must = False)\ - # .add('decay_rate', float, must = False)\ - # .add('start_lr', float, must = True)\ - # .add('stop_lr', float, must = False) - # self.cd = args.parse(jdata) self.cd = {} self.cd['start_lr'] = start_lr self.cd['stop_lr'] = stop_lr diff --git a/source/op/legacy/descrpt_se_a.cc b/source/op/legacy/descrpt_se_a.cc deleted file mode 100644 index cd7abf8a76..0000000000 --- a/source/op/legacy/descrpt_se_a.cc +++ /dev/null @@ -1,350 +0,0 @@ -#include "custom_op.h" -#include "ComputeDescriptor.h" -#include "neighbor_list.h" -#include "fmt_nlist.h" -#include "env_mat.h" -#include "errors.h" - -typedef double boxtensor_t ; -typedef double compute_t; - -REGISTER_OP("DescrptSeA") - .Attr("T: {float, double}") - .Input("coord: T") //atomic coordinates - .Input("type: int32") //atomic type - .Input("natoms: int32") //local atomic number; each type atomic number; daizheyingxiangqude atomic numbers - .Input("box : T") - .Input("mesh : int32") - .Input("davg: T") //average value of data - .Input("dstd: T") //standard deviation - .Attr("rcut_a: float") //no use - .Attr("rcut_r: float") - .Attr("rcut_r_smth: float") - .Attr("sel_a: list(int)") - .Attr("sel_r: list(int)") //all zero - .Output("descrpt: T") - .Output("descrpt_deriv: T") - .Output("rij: T") - .Output("nlist: int32"); - -template -class DescrptSeAOp : public OpKernel { -public: - explicit DescrptSeAOp(OpKernelConstruction* context) : OpKernel(context) { - OP_REQUIRES_OK(context, context->GetAttr("rcut_a", &rcut_a)); - OP_REQUIRES_OK(context, context->GetAttr("rcut_r", &rcut_r)); - OP_REQUIRES_OK(context, context->GetAttr("rcut_r_smth", &rcut_r_smth)); - OP_REQUIRES_OK(context, context->GetAttr("sel_a", &sel_a)); - OP_REQUIRES_OK(context, context->GetAttr("sel_r", &sel_r)); - cum_sum (sec_a, sel_a); - cum_sum (sec_r, sel_r); - ndescrpt_a = sec_a.back() * 4; - ndescrpt_r = sec_r.back() * 1; - ndescrpt = ndescrpt_a + ndescrpt_r; - nnei_a = sec_a.back(); - nnei_r = sec_r.back(); - nnei = nnei_a + nnei_r; - fill_nei_a = (rcut_a < 0); - count_nei_idx_overflow = 0; - } - - void Compute(OpKernelContext* context) override { - // Grab the input tensor - int context_input_index = 0; - const Tensor& coord_tensor = context->input(context_input_index++); - const Tensor& type_tensor = context->input(context_input_index++); - const Tensor& natoms_tensor = context->input(context_input_index++); - const Tensor& box_tensor = context->input(context_input_index++); - const Tensor& mesh_tensor = context->input(context_input_index++); - const Tensor& avg_tensor = context->input(context_input_index++); - const Tensor& std_tensor = context->input(context_input_index++); - - // set size of the sample - OP_REQUIRES (context, (coord_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of coord should be 2")); - OP_REQUIRES (context, (type_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of type should be 2")); - OP_REQUIRES (context, (natoms_tensor.shape().dims() == 1), errors::InvalidArgument ("Dim of natoms should be 1")); - OP_REQUIRES (context, (box_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of box should be 2")); - OP_REQUIRES (context, (mesh_tensor.shape().dims() == 1), errors::InvalidArgument ("Dim of mesh should be 1")); - OP_REQUIRES (context, (avg_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of avg should be 2")); - OP_REQUIRES (context, (std_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of std should be 2")); - OP_REQUIRES (context, (fill_nei_a), errors::InvalidArgument ("Rotational free descriptor only support the case rcut_a < 0")); - OP_REQUIRES (context, (sec_r.back() == 0), errors::InvalidArgument ("Rotational free descriptor only support all-angular information: sel_r should be all zero.")); - - OP_REQUIRES (context, (natoms_tensor.shape().dim_size(0) >= 3), errors::InvalidArgument ("number of atoms should be larger than (or equal to) 3")); - auto natoms = natoms_tensor .flat(); - int nloc = natoms(0); - int nall = natoms(1); - int ntypes = natoms_tensor.shape().dim_size(0) - 2; - int nsamples = coord_tensor.shape().dim_size(0); - - // check the sizes - OP_REQUIRES (context, (nsamples == type_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - OP_REQUIRES (context, (nsamples == box_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - OP_REQUIRES (context, (ntypes == avg_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of avg should be ntype")); - OP_REQUIRES (context, (ntypes == std_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of std should be ntype")); - - OP_REQUIRES (context, (nall * 3 == coord_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of atoms should match")); - OP_REQUIRES (context, (nall == type_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of atoms should match")); - OP_REQUIRES (context, (9 == box_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of box should be 9")); - OP_REQUIRES (context, (ndescrpt == avg_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of avg should be ndescrpt")); - OP_REQUIRES (context, (ndescrpt == std_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of std should be ndescrpt")); - - int nei_mode = 0; - if (mesh_tensor.shape().dim_size(0) == 16) { - // lammps neighbor list - nei_mode = 3; - } - else if (mesh_tensor.shape().dim_size(0) == 12) { - // user provided extended mesh - nei_mode = 2; - } - else if (mesh_tensor.shape().dim_size(0) == 6) { - // manual copied pbc - assert (nloc == nall); - nei_mode = 1; - } - else if (mesh_tensor.shape().dim_size(0) == 0) { - // no pbc - nei_mode = -1; - } - else { - throw deepmd::deepmd_exception("invalid mesh tensor"); - } - bool b_pbc = true; - // if region is given extended, do not use pbc - if (nei_mode >= 1 || nei_mode == -1) { - b_pbc = false; - } - bool b_norm_atom = false; - if (nei_mode == 1){ - b_norm_atom = true; - } - - // Create an output tensor - TensorShape descrpt_shape ; - descrpt_shape.AddDim (nsamples); - descrpt_shape.AddDim (nloc * ndescrpt); - TensorShape descrpt_deriv_shape ; - descrpt_deriv_shape.AddDim (nsamples); - descrpt_deriv_shape.AddDim (nloc * ndescrpt * 3); - TensorShape rij_shape ; - rij_shape.AddDim (nsamples); - rij_shape.AddDim (nloc * nnei * 3); - TensorShape nlist_shape ; - nlist_shape.AddDim (nsamples); - nlist_shape.AddDim (nloc * nnei); - - int context_output_index = 0; - Tensor* descrpt_tensor = NULL; - OP_REQUIRES_OK(context, context->allocate_output(context_output_index++, - descrpt_shape, - &descrpt_tensor)); - Tensor* descrpt_deriv_tensor = NULL; - OP_REQUIRES_OK(context, context->allocate_output(context_output_index++, - descrpt_deriv_shape, - &descrpt_deriv_tensor)); - Tensor* rij_tensor = NULL; - OP_REQUIRES_OK(context, context->allocate_output(context_output_index++, - rij_shape, - &rij_tensor)); - Tensor* nlist_tensor = NULL; - OP_REQUIRES_OK(context, context->allocate_output(context_output_index++, - nlist_shape, - &nlist_tensor)); - - auto coord = coord_tensor .matrix(); - auto type = type_tensor .matrix(); - auto box = box_tensor .matrix(); - auto mesh = mesh_tensor .flat(); - auto avg = avg_tensor .matrix(); - auto std = std_tensor .matrix(); - auto descrpt = descrpt_tensor ->matrix(); - auto descrpt_deriv = descrpt_deriv_tensor ->matrix(); - auto rij = rij_tensor ->matrix(); - auto nlist = nlist_tensor ->matrix(); - - // // check the types - // int max_type_v = 0; - // for (int ii = 0; ii < natoms; ++ii){ - // if (type(0, ii) > max_type_v) max_type_v = type(0, ii); - // } - // int ntypes = max_type_v + 1; - OP_REQUIRES (context, (ntypes == int(sel_a.size())), errors::InvalidArgument ("number of types should match the length of sel array")); - OP_REQUIRES (context, (ntypes == int(sel_r.size())), errors::InvalidArgument ("number of types should match the length of sel array")); - - for (int kk = 0; kk < nsamples; ++kk){ - // set region - boxtensor_t boxt [9] = {0}; - for (int dd = 0; dd < 9; ++dd) { - boxt[dd] = box(kk, dd); - } - SimulationRegion region; - region.reinitBox (boxt); - - // set & normalize coord - std::vector d_coord3 (nall*3); - for (int ii = 0; ii < nall; ++ii){ - for (int dd = 0; dd < 3; ++dd){ - d_coord3[ii*3+dd] = coord(kk, ii*3+dd); - } - if (b_norm_atom){ - compute_t inter[3]; - region.phys2Inter (inter, &d_coord3[3*ii]); - for (int dd = 0; dd < 3; ++dd){ - if (inter[dd] < 0 ) inter[dd] += 1.; - else if (inter[dd] >= 1) inter[dd] -= 1.; - } - region.inter2Phys (&d_coord3[3*ii], inter); - } - } - - // set type - std::vector d_type (nall); - for (int ii = 0; ii < nall; ++ii) d_type[ii] = type(kk, ii); - - // build nlist - std::vector > d_nlist_a; - std::vector > d_nlist_r; - std::vector nlist_map; - bool b_nlist_map = false; - if (nei_mode == 3) { - int * pilist, *pjrange, *pjlist; - memcpy (&pilist, &mesh(4), sizeof(int *)); - memcpy (&pjrange, &mesh(8), sizeof(int *)); - memcpy (&pjlist, &mesh(12), sizeof(int *)); - int inum = mesh(1); - assert (inum == nloc); - d_nlist_a.resize (inum); - d_nlist_r.resize (inum); - for (unsigned ii = 0; ii < inum; ++ii){ - d_nlist_r.reserve (pjrange[inum] / inum + 10); - } - for (unsigned ii = 0; ii < inum; ++ii){ - int i_idx = pilist[ii]; - for (unsigned jj = pjrange[ii]; jj < pjrange[ii+1]; ++jj){ - int j_idx = pjlist[jj]; - d_nlist_r[i_idx].push_back (j_idx); - } - } - } - else if (nei_mode == 2) { - std::vector nat_stt = {mesh(1-1), mesh(2-1), mesh(3-1)}; - std::vector nat_end = {mesh(4-1), mesh(5-1), mesh(6-1)}; - std::vector ext_stt = {mesh(7-1), mesh(8-1), mesh(9-1)}; - std::vector ext_end = {mesh(10-1), mesh(11-1), mesh(12-1)}; - std::vector global_grid (3); - for (int dd = 0; dd < 3; ++dd) global_grid[dd] = nat_end[dd] - nat_stt[dd]; - ::build_nlist (d_nlist_a, d_nlist_r, d_coord3, nloc, rcut_a, rcut_r, nat_stt, nat_end, ext_stt, ext_end, region, global_grid); - } - else if (nei_mode == 1) { - std::vector bk_d_coord3 = d_coord3; - std::vector bk_d_type = d_type; - std::vector ncell, ngcell; - copy_coord(d_coord3, d_type, nlist_map, ncell, ngcell, bk_d_coord3, bk_d_type, rcut_r, region); - b_nlist_map = true; - std::vector nat_stt(3, 0); - std::vector ext_stt(3), ext_end(3); - for (int dd = 0; dd < 3; ++dd){ - ext_stt[dd] = -ngcell[dd]; - ext_end[dd] = ncell[dd] + ngcell[dd]; - } - ::build_nlist (d_nlist_a, d_nlist_r, d_coord3, nloc, rcut_a, rcut_r, nat_stt, ncell, ext_stt, ext_end, region, ncell); - } - else if (nei_mode == -1){ - ::build_nlist (d_nlist_a, d_nlist_r, d_coord3, rcut_a, rcut_r, NULL); - } - else { - throw deepmd::deepmd_exception("unknow neighbor mode"); - } - - // loop over atoms, compute descriptors for each atom -#pragma omp parallel for - for (int ii = 0; ii < nloc; ++ii){ - std::vector fmt_nlist_a; - std::vector fmt_nlist_r; - int ret = -1; - if (fill_nei_a){ - if ((ret = format_nlist_i_fill_a (fmt_nlist_a, fmt_nlist_r, d_coord3, ntypes, d_type, region, b_pbc, ii, d_nlist_a[ii], d_nlist_r[ii], rcut_r, sec_a, sec_r)) != -1){ - if (count_nei_idx_overflow == 0) { - std::cout << "WARNING: Radial neighbor list length of type " << ret << " is not enough" << std::endl; - flush(std::cout); - count_nei_idx_overflow ++; - } - } - } - - std::vector d_descrpt_a; - std::vector d_descrpt_a_deriv; - std::vector d_descrpt_r; - std::vector d_descrpt_r_deriv; - std::vector d_rij_a; - std::vector d_rij_r; - env_mat_a (d_descrpt_a, - d_descrpt_a_deriv, - d_rij_a, - d_coord3, - ntypes, - d_type, - region, - b_pbc, - ii, - fmt_nlist_a, - sec_a, - rcut_r_smth, - rcut_r); - - // check sizes - assert (d_descrpt_a.size() == ndescrpt_a); - assert (d_descrpt_a_deriv.size() == ndescrpt_a * 3); - assert (d_rij_a.size() == nnei_a * 3); - assert (int(fmt_nlist_a.size()) == nnei_a); - // record outputs - for (int jj = 0; jj < ndescrpt_a; ++jj) { - descrpt(kk, ii * ndescrpt + jj) = (d_descrpt_a[jj] - avg(d_type[ii], jj)) / std(d_type[ii], jj); - } - for (int jj = 0; jj < ndescrpt_a * 3; ++jj) { - descrpt_deriv(kk, ii * ndescrpt * 3 + jj) = d_descrpt_a_deriv[jj] / std(d_type[ii], jj/3); - } - for (int jj = 0; jj < nnei_a * 3; ++jj){ - rij (kk, ii * nnei * 3 + jj) = d_rij_a[jj]; - } - for (int jj = 0; jj < nnei_a; ++jj){ - int record = fmt_nlist_a[jj]; - if (b_nlist_map && record >= 0) { - record = nlist_map[record]; - } - nlist (kk, ii * nnei + jj) = record; - } - } - } - } -private: - float rcut_a; - float rcut_r; - float rcut_r_smth; - std::vector sel_r; - std::vector sel_a; - std::vector sec_a; - std::vector sec_r; - int ndescrpt, ndescrpt_a, ndescrpt_r; - int nnei, nnei_a, nnei_r; - bool fill_nei_a; - int count_nei_idx_overflow; - void - cum_sum (std::vector & sec, - const std::vector & n_sel) const { - sec.resize (n_sel.size() + 1); - sec[0] = 0; - for (int ii = 1; ii < sec.size(); ++ii){ - sec[ii] = sec[ii-1] + n_sel[ii-1]; - } - } -}; - -#define REGISTER_CPU(T) \ -REGISTER_KERNEL_BUILDER( \ - Name("DescrptSeA").Device(DEVICE_CPU).TypeConstraint("T"), \ - DescrptSeAOp); -REGISTER_CPU(float); -REGISTER_CPU(double); - diff --git a/source/op/legacy/descrpt_se_r.cc b/source/op/legacy/descrpt_se_r.cc deleted file mode 100644 index 408818fbee..0000000000 --- a/source/op/legacy/descrpt_se_r.cc +++ /dev/null @@ -1,335 +0,0 @@ -#include "custom_op.h" -#include "ComputeDescriptor.h" -#include "neighbor_list.h" -#include "fmt_nlist.h" -#include "env_mat.h" -#include "errors.h" - -typedef double boxtensor_t ; -typedef double compute_t; - -REGISTER_OP("DescrptSeR") -.Attr("T: {float, double}") -.Input("coord: T") -.Input("type: int32") -.Input("natoms: int32") -.Input("box: T") -.Input("mesh: int32") -.Input("davg: T") -.Input("dstd: T") -.Attr("rcut: float") -.Attr("rcut_smth: float") -.Attr("sel: list(int)") -.Output("descrpt: T") -.Output("descrpt_deriv: T") -.Output("rij: T") -.Output("nlist: int32"); - -template -class DescrptSeROp : public OpKernel { -public: - explicit DescrptSeROp(OpKernelConstruction* context) : OpKernel(context) { - OP_REQUIRES_OK(context, context->GetAttr("rcut", &rcut)); - OP_REQUIRES_OK(context, context->GetAttr("rcut_smth", &rcut_smth)); - OP_REQUIRES_OK(context, context->GetAttr("sel", &sel)); - cum_sum (sec, sel); - sel_null.resize(3, 0); - cum_sum (sec_null, sel_null); - ndescrpt = sec.back() * 1; - nnei = sec.back(); - fill_nei_a = true; - count_nei_idx_overflow = 0; - } - - void Compute(OpKernelContext* context) override { - // Grab the input tensor - int context_input_index = 0; - const Tensor& coord_tensor = context->input(context_input_index++); - const Tensor& type_tensor = context->input(context_input_index++); - const Tensor& natoms_tensor = context->input(context_input_index++); - const Tensor& box_tensor = context->input(context_input_index++); - const Tensor& mesh_tensor = context->input(context_input_index++); - const Tensor& avg_tensor = context->input(context_input_index++); - const Tensor& std_tensor = context->input(context_input_index++); - - // set size of the sample - OP_REQUIRES (context, (coord_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of coord should be 2")); - OP_REQUIRES (context, (type_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of type should be 2")); - OP_REQUIRES (context, (natoms_tensor.shape().dims() == 1), errors::InvalidArgument ("Dim of natoms should be 1")); - OP_REQUIRES (context, (box_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of box should be 2")); - OP_REQUIRES (context, (mesh_tensor.shape().dims() == 1), errors::InvalidArgument ("Dim of mesh should be 1")); - OP_REQUIRES (context, (avg_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of avg should be 2")); - OP_REQUIRES (context, (std_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of std should be 2")); - OP_REQUIRES (context, (fill_nei_a), errors::InvalidArgument ("Rotational free descriptor only support the case rcut_a < 0")); - - OP_REQUIRES (context, (natoms_tensor.shape().dim_size(0) >= 3), errors::InvalidArgument ("number of atoms should be larger than (or equal to) 3")); - auto natoms = natoms_tensor .flat(); - int nloc = natoms(0); - int nall = natoms(1); - int ntypes = natoms_tensor.shape().dim_size(0) - 2; - int nsamples = coord_tensor.shape().dim_size(0); - - // check the sizes - OP_REQUIRES (context, (nsamples == type_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - OP_REQUIRES (context, (nsamples == box_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - OP_REQUIRES (context, (ntypes == avg_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of avg should be ntype")); - OP_REQUIRES (context, (ntypes == std_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of std should be ntype")); - - OP_REQUIRES (context, (nall * 3 == coord_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of atoms should match")); - OP_REQUIRES (context, (nall == type_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of atoms should match")); - OP_REQUIRES (context, (9 == box_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of box should be 9")); - OP_REQUIRES (context, (ndescrpt == avg_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of avg should be ndescrpt")); - OP_REQUIRES (context, (ndescrpt == std_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of std should be ndescrpt")); - - int nei_mode = 0; - if (mesh_tensor.shape().dim_size(0) == 16) { - // lammps neighbor list - nei_mode = 3; - } - else if (mesh_tensor.shape().dim_size(0) == 12) { - // user provided extended mesh - nei_mode = 2; - } - else if (mesh_tensor.shape().dim_size(0) == 6) { - // manual copied pbc - assert (nloc == nall); - nei_mode = 1; - } - else if (mesh_tensor.shape().dim_size(0) == 0) { - // no pbc - nei_mode = -1; - } - else { - throw deepmd::deepmd_exception("invalid mesh tensor"); - } - bool b_pbc = true; - // if region is given extended, do not use pbc - if (nei_mode >= 1 || nei_mode == -1) { - b_pbc = false; - } - bool b_norm_atom = false; - if (nei_mode == 1){ - b_norm_atom = true; - } - - // Create an output tensor - TensorShape descrpt_shape ; - descrpt_shape.AddDim (nsamples); - descrpt_shape.AddDim (nloc * ndescrpt); - TensorShape descrpt_deriv_shape ; - descrpt_deriv_shape.AddDim (nsamples); - descrpt_deriv_shape.AddDim (nloc * ndescrpt * 3); - TensorShape rij_shape ; - rij_shape.AddDim (nsamples); - rij_shape.AddDim (nloc * nnei * 3); - TensorShape nlist_shape ; - nlist_shape.AddDim (nsamples); - nlist_shape.AddDim (nloc * nnei); - - int context_output_index = 0; - Tensor* descrpt_tensor = NULL; - OP_REQUIRES_OK(context, context->allocate_output(context_output_index++, - descrpt_shape, - &descrpt_tensor)); - Tensor* descrpt_deriv_tensor = NULL; - OP_REQUIRES_OK(context, context->allocate_output(context_output_index++, - descrpt_deriv_shape, - &descrpt_deriv_tensor)); - Tensor* rij_tensor = NULL; - OP_REQUIRES_OK(context, context->allocate_output(context_output_index++, - rij_shape, - &rij_tensor)); - Tensor* nlist_tensor = NULL; - OP_REQUIRES_OK(context, context->allocate_output(context_output_index++, - nlist_shape, - &nlist_tensor)); - - auto coord = coord_tensor .matrix(); - auto type = type_tensor .matrix(); - auto box = box_tensor .matrix(); - auto mesh = mesh_tensor .flat(); - auto avg = avg_tensor .matrix(); - auto std = std_tensor .matrix(); - auto descrpt = descrpt_tensor ->matrix(); - auto descrpt_deriv = descrpt_deriv_tensor ->matrix(); - auto rij = rij_tensor ->matrix(); - auto nlist = nlist_tensor ->matrix(); - - OP_REQUIRES (context, (ntypes == int(sel.size())), errors::InvalidArgument ("number of types should match the length of sel array")); - - for (int kk = 0; kk < nsamples; ++kk){ - // set region - boxtensor_t boxt [9] = {0}; - for (int dd = 0; dd < 9; ++dd) { - boxt[dd] = box(kk, dd); - } - SimulationRegion region; - region.reinitBox (boxt); - - // set & normalize coord - std::vector d_coord3 (nall*3); - for (int ii = 0; ii < nall; ++ii){ - for (int dd = 0; dd < 3; ++dd){ - d_coord3[ii*3+dd] = coord(kk, ii*3+dd); - } - if (b_norm_atom){ - compute_t inter[3]; - region.phys2Inter (inter, &d_coord3[3*ii]); - for (int dd = 0; dd < 3; ++dd){ - if (inter[dd] < 0 ) inter[dd] += 1.; - else if (inter[dd] >= 1) inter[dd] -= 1.; - } - region.inter2Phys (&d_coord3[3*ii], inter); - } - } - - // set type - std::vector d_type (nall); - for (int ii = 0; ii < nall; ++ii) d_type[ii] = type(kk, ii); - - // build nlist - std::vector > d_nlist; - std::vector > d_nlist_null; - std::vector nlist_map; - bool b_nlist_map = false; - if (nei_mode == 3) { - int * pilist, *pjrange, *pjlist; - memcpy (&pilist, &mesh(4), sizeof(int *)); - memcpy (&pjrange, &mesh(8), sizeof(int *)); - memcpy (&pjlist, &mesh(12), sizeof(int *)); - int inum = mesh(1); - assert (inum == nloc); - d_nlist_null.resize (inum); - d_nlist.resize (inum); - for (unsigned ii = 0; ii < inum; ++ii){ - d_nlist.reserve (pjrange[inum] / inum + 10); - } - for (unsigned ii = 0; ii < inum; ++ii){ - int i_idx = pilist[ii]; - for (unsigned jj = pjrange[ii]; jj < pjrange[ii+1]; ++jj){ - int j_idx = pjlist[jj]; - d_nlist[i_idx].push_back (j_idx); - } - } - } - else if (nei_mode == 2) { - std::vector nat_stt = {mesh(1-1), mesh(2-1), mesh(3-1)}; - std::vector nat_end = {mesh(4-1), mesh(5-1), mesh(6-1)}; - std::vector ext_stt = {mesh(7-1), mesh(8-1), mesh(9-1)}; - std::vector ext_end = {mesh(10-1), mesh(11-1), mesh(12-1)}; - std::vector global_grid (3); - for (int dd = 0; dd < 3; ++dd) global_grid[dd] = nat_end[dd] - nat_stt[dd]; - ::build_nlist (d_nlist_null, d_nlist, d_coord3, nloc, -1, rcut, nat_stt, nat_end, ext_stt, ext_end, region, global_grid); - } - else if (nei_mode == 1) { - std::vector bk_d_coord3 = d_coord3; - std::vector bk_d_type = d_type; - std::vector ncell, ngcell; - copy_coord(d_coord3, d_type, nlist_map, ncell, ngcell, bk_d_coord3, bk_d_type, rcut, region); - b_nlist_map = true; - std::vector nat_stt(3, 0); - std::vector ext_stt(3), ext_end(3); - for (int dd = 0; dd < 3; ++dd){ - ext_stt[dd] = -ngcell[dd]; - ext_end[dd] = ncell[dd] + ngcell[dd]; - } - ::build_nlist (d_nlist_null, d_nlist, d_coord3, nloc, -1, rcut, nat_stt, ncell, ext_stt, ext_end, region, ncell); - } - else if (nei_mode == -1){ - ::build_nlist (d_nlist_null, d_nlist, d_coord3, -1, rcut, NULL); - } - else { - throw deepmd::deepmd_exception("unknow neighbor mode"); - } - - // loop over atoms, compute descriptors for each atom -#pragma omp parallel for - for (int ii = 0; ii < nloc; ++ii){ - std::vector fmt_nlist_null; - std::vector fmt_nlist; - int ret = -1; - if (fill_nei_a){ - if ((ret = format_nlist_i_fill_a (fmt_nlist, fmt_nlist_null, d_coord3, ntypes, d_type, region, b_pbc, ii, d_nlist_null[ii], d_nlist[ii], rcut, sec, sec_null)) != -1){ - if (count_nei_idx_overflow == 0) { - std::cout << "WARNING: Radial neighbor list length of type " << ret << " is not enough" << std::endl; - flush(std::cout); - count_nei_idx_overflow ++; - } - } - } - // std::cout << ii << " " ; - // for (int jj = 0 ; jj < fmt_nlist.size(); ++jj){ - // std::cout << fmt_nlist[jj] << " " ; - // } - // std::cout << std::endl; - - std::vector d_descrpt; - std::vector d_descrpt_deriv; - std::vector d_rij; - env_mat_r (d_descrpt, - d_descrpt_deriv, - d_rij, - d_coord3, - ntypes, - d_type, - region, - b_pbc, - ii, - fmt_nlist, - sec, - rcut_smth, - rcut); - - // check sizes - assert (d_descrpt_deriv.size() == ndescrpt * 3); - assert (d_rij.size() == nnei * 3); - assert (int(fmt_nlist.size()) == nnei); - // record outputs - for (int jj = 0; jj < ndescrpt; ++jj) { - descrpt(kk, ii * ndescrpt + jj) = (d_descrpt[jj] - avg(d_type[ii], jj)) / std(d_type[ii], jj); - } - for (int jj = 0; jj < ndescrpt * 3; ++jj) { - descrpt_deriv(kk, ii * ndescrpt * 3 + jj) = d_descrpt_deriv[jj] / std(d_type[ii], jj/3); - } - for (int jj = 0; jj < nnei * 3; ++jj){ - rij (kk, ii * nnei * 3 + jj) = d_rij[jj]; - } - for (int jj = 0; jj < nnei; ++jj){ - int record = fmt_nlist[jj]; - if (b_nlist_map && record >= 0) { - record = nlist_map[record]; - } - nlist (kk, ii * nnei + jj) = record; - } - } - } - } -private: - float rcut; - float rcut_smth; - std::vector sel; - std::vector sel_null; - std::vector sec; - std::vector sec_null; - int ndescrpt; - int nnei; - bool fill_nei_a; - int count_nei_idx_overflow; - void - cum_sum (std::vector & sec, - const std::vector & n_sel) const { - sec.resize (n_sel.size() + 1); - sec[0] = 0; - for (int ii = 1; ii < sec.size(); ++ii){ - sec[ii] = sec[ii-1] + n_sel[ii-1]; - } - } -}; - -#define REGISTER_CPU(T) \ -REGISTER_KERNEL_BUILDER( \ - Name("DescrptSeR").Device(DEVICE_CPU).TypeConstraint("T"), \ - DescrptSeROp); -REGISTER_CPU(float); -REGISTER_CPU(double); - diff --git a/source/op/legacy/prod_force_se_a.cc b/source/op/legacy/prod_force_se_a.cc deleted file mode 100644 index a07cd1d02d..0000000000 --- a/source/op/legacy/prod_force_se_a.cc +++ /dev/null @@ -1,114 +0,0 @@ -#include "custom_op.h" -#include "prod_force.h" - -REGISTER_OP("ProdForceSeA") -.Attr("T: {float, double}") -.Input("net_deriv: T") -.Input("in_deriv: T") -.Input("nlist: int32") -.Input("natoms: int32") -.Attr("n_a_sel: int") -.Attr("n_r_sel: int") -.Output("force: T"); - - -using namespace tensorflow; - -using CPUDevice = Eigen::ThreadPoolDevice; -using GPUDevice = Eigen::GpuDevice; - -template -class ProdForceSeAOp : public OpKernel { - public: - explicit ProdForceSeAOp(OpKernelConstruction* context) : OpKernel(context) { - OP_REQUIRES_OK(context, context->GetAttr("n_a_sel", &n_a_sel)); - OP_REQUIRES_OK(context, context->GetAttr("n_r_sel", &n_r_sel)); - // n_a_shift = n_a_sel * 4; - } - - void Compute(OpKernelContext* context) override { - // Grab the input tensor - int context_input_index = 0; - const Tensor& net_deriv_tensor = context->input(context_input_index++); - const Tensor& in_deriv_tensor = context->input(context_input_index++); - const Tensor& nlist_tensor = context->input(context_input_index++); - const Tensor& natoms_tensor = context->input(context_input_index++); - - // set size of the sample - OP_REQUIRES (context, (net_deriv_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of net deriv should be 2")); - OP_REQUIRES (context, (in_deriv_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of input deriv should be 2")); - OP_REQUIRES (context, (nlist_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of nlist should be 2")); - OP_REQUIRES (context, (natoms_tensor.shape().dims() == 1), errors::InvalidArgument ("Dim of natoms should be 1")); - - OP_REQUIRES (context, (natoms_tensor.shape().dim_size(0) >= 3), errors::InvalidArgument ("number of atoms should be larger than (or equal to) 3")); - auto natoms = natoms_tensor .flat(); - - int nframes = net_deriv_tensor.shape().dim_size(0); - int nloc = natoms(0); - int nall = natoms(1); - int ndescrpt = net_deriv_tensor.shape().dim_size(1) / nloc; - int nnei = nlist_tensor.shape().dim_size(1) / nloc; - - // check the sizes - OP_REQUIRES (context, (nframes == in_deriv_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - OP_REQUIRES (context, (nframes == nlist_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - - OP_REQUIRES (context, (nloc * ndescrpt * 3 == in_deriv_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of descriptors should match")); - OP_REQUIRES (context, (nnei == n_a_sel + n_r_sel), errors::InvalidArgument ("number of neighbors should match")); - OP_REQUIRES (context, (0 == n_r_sel), errors::InvalidArgument ("Rotational free only support all-angular information")); - - // Create an output tensor - TensorShape force_shape ; - force_shape.AddDim (nframes); - force_shape.AddDim (3 * nall); - Tensor* force_tensor = NULL; - int context_output_index = 0; - OP_REQUIRES_OK(context, context->allocate_output(context_output_index++, - force_shape, &force_tensor)); - - // flat the tensors - auto net_deriv = net_deriv_tensor.flat(); - auto in_deriv = in_deriv_tensor.flat(); - auto nlist = nlist_tensor.flat(); - auto force = force_tensor->flat(); - - assert (nframes == force_shape.dim_size(0)); - assert (nframes == net_deriv_tensor.shape().dim_size(0)); - assert (nframes == in_deriv_tensor.shape().dim_size(0)); - assert (nframes == nlist_tensor.shape().dim_size(0)); - assert (nall * 3 == force_shape.dim_size(1)); - assert (nloc * ndescrpt == net_deriv_tensor.shape().dim_size(1)); - assert (nloc * ndescrpt * 3 == in_deriv_tensor.shape().dim_size(1)); - assert (nloc * nnei == nlist_tensor.shape().dim_size(1)); - assert (nnei * 4 == ndescrpt); - - // loop over samples -#pragma omp parallel for - for (int kk = 0; kk < nframes; ++kk){ - int force_iter = kk * nall * 3; - int net_iter = kk * nloc * ndescrpt; - int in_iter = kk * nloc * ndescrpt * 3; - int nlist_iter = kk * nloc * nnei; - - deepmd::prod_force_a_cpu( - &force(force_iter), - &net_deriv(net_iter), - &in_deriv(in_iter), - &nlist(nlist_iter), - nloc, - nall, - nnei); - } - } -private: - int n_r_sel, n_a_sel; -}; - -// Register the CPU kernels. -#define REGISTER_CPU(T) \ -REGISTER_KERNEL_BUILDER( \ - Name("ProdForceSeA").Device(DEVICE_CPU).TypeConstraint("T"), \ - ProdForceSeAOp); -REGISTER_CPU(float); -REGISTER_CPU(double); - diff --git a/source/op/legacy/prod_force_se_r.cc b/source/op/legacy/prod_force_se_r.cc deleted file mode 100644 index 1aa6d76760..0000000000 --- a/source/op/legacy/prod_force_se_r.cc +++ /dev/null @@ -1,104 +0,0 @@ -#include "custom_op.h" -#include "prod_force.h" - -REGISTER_OP("ProdForceSeR") -.Attr("T: {float, double}") -.Input("net_deriv: T") -.Input("in_deriv: T") -.Input("nlist: int32") -.Input("natoms: int32") -.Output("force: T"); - -using namespace tensorflow; - -using CPUDevice = Eigen::ThreadPoolDevice; - -template -class ProdForceSeROp : public OpKernel { - public: - explicit ProdForceSeROp(OpKernelConstruction* context) : OpKernel(context) { - } - - void Compute(OpKernelContext* context) override { - // Grab the input tensor - int context_input_index = 0; - const Tensor& net_deriv_tensor = context->input(context_input_index++); - const Tensor& in_deriv_tensor = context->input(context_input_index++); - const Tensor& nlist_tensor = context->input(context_input_index++); - const Tensor& natoms_tensor = context->input(context_input_index++); - - // set size of the sample - OP_REQUIRES (context, (net_deriv_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of net deriv should be 2")); - OP_REQUIRES (context, (in_deriv_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of input deriv should be 2")); - OP_REQUIRES (context, (nlist_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of nlist should be 2")); - OP_REQUIRES (context, (natoms_tensor.shape().dims() == 1), errors::InvalidArgument ("Dim of natoms should be 1")); - - OP_REQUIRES (context, (natoms_tensor.shape().dim_size(0) >= 3), errors::InvalidArgument ("number of atoms should be larger than (or equal to) 3")); - auto natoms = natoms_tensor .flat(); - - int nframes = net_deriv_tensor.shape().dim_size(0); - int nloc = natoms(0); - int nall = natoms(1); - int ndescrpt = net_deriv_tensor.shape().dim_size(1) / nloc; - int nnei = nlist_tensor.shape().dim_size(1) / nloc; - - // check the sizes - OP_REQUIRES (context, (nframes == in_deriv_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - OP_REQUIRES (context, (nframes == nlist_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - - OP_REQUIRES (context, (nloc * ndescrpt * 3 == in_deriv_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of descriptors should match")); - - // Create an output tensor - TensorShape force_shape ; - force_shape.AddDim (nframes); - force_shape.AddDim (3 * nall); - Tensor* force_tensor = NULL; - int context_output_index = 0; - OP_REQUIRES_OK(context, context->allocate_output(context_output_index++, - force_shape, &force_tensor)); - - // flat the tensors - auto net_deriv = net_deriv_tensor.flat(); - auto in_deriv = in_deriv_tensor.flat(); - auto nlist = nlist_tensor.flat(); - auto force = force_tensor->flat(); - - assert (nframes == force_shape.dim_size(0)); - assert (nframes == net_deriv_tensor.shape().dim_size(0)); - assert (nframes == in_deriv_tensor.shape().dim_size(0)); - assert (nframes == nlist_tensor.shape().dim_size(0)); - assert (nall * 3 == force_shape.dim_size(1)); - assert (nloc * ndescrpt == net_deriv_tensor.shape().dim_size(1)); - assert (nloc * ndescrpt * 3 == in_deriv_tensor.shape().dim_size(1)); - assert (nloc * nnei == nlist_tensor.shape().dim_size(1)); - assert (nnei * 1 == ndescrpt); - - // loop over samples -#pragma omp parallel for - for (int kk = 0; kk < nframes; ++kk){ - int force_iter = kk * nall * 3; - int net_iter = kk * nloc * ndescrpt; - int in_iter = kk * nloc * ndescrpt * 3; - int nlist_iter = kk * nloc * nnei; - - deepmd::prod_force_r_cpu( - &force(force_iter), - &net_deriv(net_iter), - &in_deriv(in_iter), - &nlist(nlist_iter), - nloc, - nall, - nnei); - } - } -}; - -// Register the CPU kernels. -#define REGISTER_CPU(T) \ -REGISTER_KERNEL_BUILDER( \ - Name("ProdForceSeR").Device(DEVICE_CPU).TypeConstraint("T"), \ - ProdForceSeROp); -REGISTER_CPU(float); -REGISTER_CPU(double); - - diff --git a/source/op/legacy/prod_virial_se_a.cc b/source/op/legacy/prod_virial_se_a.cc deleted file mode 100644 index 80223f5e67..0000000000 --- a/source/op/legacy/prod_virial_se_a.cc +++ /dev/null @@ -1,119 +0,0 @@ -#include "custom_op.h" -#include "prod_virial.h" - -REGISTER_OP("ProdVirialSeA") -.Attr("T: {float, double}") -.Input("net_deriv: T") -.Input("in_deriv: T") -.Input("rij: T") -.Input("nlist: int32") -.Input("natoms: int32") -.Attr("n_a_sel: int") -.Attr("n_r_sel: int") -.Output("virial: T") -.Output("atom_virial: T"); - -using namespace tensorflow; - -using CPUDevice = Eigen::ThreadPoolDevice; -using GPUDevice = Eigen::GpuDevice; - -template -class ProdVirialSeAOp : public OpKernel { - public: - explicit ProdVirialSeAOp(OpKernelConstruction* context) : OpKernel(context) { - OP_REQUIRES_OK(context, context->GetAttr("n_a_sel", &n_a_sel)); - OP_REQUIRES_OK(context, context->GetAttr("n_r_sel", &n_r_sel)); - // n_a_shift = n_a_sel * 4; - } - - void Compute(OpKernelContext* context) override { - // Grab the input tensor - int context_input_index = 0; - const Tensor& net_deriv_tensor = context->input(context_input_index++); - const Tensor& in_deriv_tensor = context->input(context_input_index++); - const Tensor& rij_tensor = context->input(context_input_index++); - const Tensor& nlist_tensor = context->input(context_input_index++); - const Tensor& natoms_tensor = context->input(context_input_index++); - - // set size of the sample - OP_REQUIRES (context, (net_deriv_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of net deriv should be 2")); - OP_REQUIRES (context, (in_deriv_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of input deriv should be 2")); - OP_REQUIRES (context, (rij_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of rij should be 2")); - OP_REQUIRES (context, (nlist_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of nlist should be 2")); - OP_REQUIRES (context, (natoms_tensor.shape().dims() == 1), errors::InvalidArgument ("Dim of natoms should be 1")); - - OP_REQUIRES (context, (natoms_tensor.shape().dim_size(0) >= 3), errors::InvalidArgument ("number of atoms should be larger than (or equal to) 3")); - auto natoms = natoms_tensor .flat(); - - int nframes = net_deriv_tensor.shape().dim_size(0); - int nloc = natoms(0); - int nall = natoms(1); - int ndescrpt = net_deriv_tensor.shape().dim_size(1) / nloc; - int nnei = nlist_tensor.shape().dim_size(1) / nloc; - - // check the sizes - OP_REQUIRES (context, (nframes == in_deriv_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - OP_REQUIRES (context, (nframes == rij_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - OP_REQUIRES (context, (nframes == nlist_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - - OP_REQUIRES (context, (nloc * ndescrpt * 3 == in_deriv_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of descriptors should match")); - OP_REQUIRES (context, (nloc * nnei * 3 == rij_tensor.shape().dim_size(1)), errors::InvalidArgument ("dim of rij should be nnei * 3")); - OP_REQUIRES (context, (nnei == n_a_sel + n_r_sel), errors::InvalidArgument ("number of neighbors should match")); - - // Create an output tensor - TensorShape virial_shape ; - virial_shape.AddDim (nframes); - virial_shape.AddDim (9); - Tensor* virial_tensor = NULL; - OP_REQUIRES_OK(context, context->allocate_output(0, virial_shape, &virial_tensor)); - TensorShape atom_virial_shape ; - atom_virial_shape.AddDim (nframes); - atom_virial_shape.AddDim (9 * nall); - Tensor* atom_virial_tensor = NULL; - OP_REQUIRES_OK(context, context->allocate_output(1, atom_virial_shape, &atom_virial_tensor)); - - // flat the tensors - auto net_deriv = net_deriv_tensor.flat(); - auto in_deriv = in_deriv_tensor.flat(); - auto rij = rij_tensor.flat(); - auto nlist = nlist_tensor.flat(); - auto virial = virial_tensor->flat(); - auto atom_virial = atom_virial_tensor->flat(); - - // loop over samples -#pragma omp parallel for - for (int kk = 0; kk < nframes; ++kk){ - int net_iter = kk * nloc * ndescrpt; - int in_iter = kk * nloc * ndescrpt * 3; - int rij_iter = kk * nloc * nnei * 3; - int nlist_iter = kk * nloc * nnei; - int virial_iter = kk * 9; - int atom_virial_iter = kk * nall * 9; - - deepmd::prod_virial_a_cpu( - &virial(virial_iter), - &atom_virial(atom_virial_iter), - &net_deriv(net_iter), - &in_deriv(in_iter), - &rij(rij_iter), - &nlist(nlist_iter), - nloc, - nall, - nnei); - } - } -private: - int n_r_sel, n_a_sel; -}; - -// Register the CPU kernels. -#define REGISTER_CPU(T) \ -REGISTER_KERNEL_BUILDER( \ - Name("ProdVirialSeA").Device(DEVICE_CPU).TypeConstraint("T"), \ - ProdVirialSeAOp); -REGISTER_CPU(float); -REGISTER_CPU(double); - - - diff --git a/source/op/legacy/prod_virial_se_r.cc b/source/op/legacy/prod_virial_se_r.cc deleted file mode 100644 index d063de03a3..0000000000 --- a/source/op/legacy/prod_virial_se_r.cc +++ /dev/null @@ -1,109 +0,0 @@ -#include "custom_op.h" -#include "prod_virial.h" - -REGISTER_OP("ProdVirialSeR") -.Attr("T: {float, double}") -.Input("net_deriv: T") -.Input("in_deriv: T") -.Input("rij: T") -.Input("nlist: int32") -.Input("natoms: int32") -.Output("virial: T") -.Output("atom_virial: T"); - -using namespace tensorflow; - -using CPUDevice = Eigen::ThreadPoolDevice; - -template -class ProdVirialSeROp : public OpKernel { - public: - explicit ProdVirialSeROp(OpKernelConstruction* context) : OpKernel(context) { - } - - void Compute(OpKernelContext* context) override { - // Grab the input tensor - int context_input_index = 0; - const Tensor& net_deriv_tensor = context->input(context_input_index++); - const Tensor& in_deriv_tensor = context->input(context_input_index++); - const Tensor& rij_tensor = context->input(context_input_index++); - const Tensor& nlist_tensor = context->input(context_input_index++); - const Tensor& natoms_tensor = context->input(context_input_index++); - - // set size of the sample - OP_REQUIRES (context, (net_deriv_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of net deriv should be 2")); - OP_REQUIRES (context, (in_deriv_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of input deriv should be 2")); - OP_REQUIRES (context, (rij_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of rij should be 2")); - OP_REQUIRES (context, (nlist_tensor.shape().dims() == 2), errors::InvalidArgument ("Dim of nlist should be 2")); - OP_REQUIRES (context, (natoms_tensor.shape().dims() == 1), errors::InvalidArgument ("Dim of natoms should be 1")); - - OP_REQUIRES (context, (natoms_tensor.shape().dim_size(0) >= 3), errors::InvalidArgument ("number of atoms should be larger than (or equal to) 3")); - auto natoms = natoms_tensor .flat(); - - int nframes = net_deriv_tensor.shape().dim_size(0); - int nloc = natoms(0); - int nall = natoms(1); - int ndescrpt = net_deriv_tensor.shape().dim_size(1) / nloc; - int nnei = nlist_tensor.shape().dim_size(1) / nloc; - - // check the sizes - OP_REQUIRES (context, (nframes == in_deriv_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - OP_REQUIRES (context, (nframes == rij_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - OP_REQUIRES (context, (nframes == nlist_tensor.shape().dim_size(0)), errors::InvalidArgument ("number of samples should match")); - - OP_REQUIRES (context, (nloc * ndescrpt * 3 == in_deriv_tensor.shape().dim_size(1)), errors::InvalidArgument ("number of descriptors should match")); - OP_REQUIRES (context, (nloc * nnei * 3 == rij_tensor.shape().dim_size(1)), errors::InvalidArgument ("dim of rij should be nnei * 3")); - - // Create an output tensor - TensorShape virial_shape ; - virial_shape.AddDim (nframes); - virial_shape.AddDim (9); - Tensor* virial_tensor = NULL; - OP_REQUIRES_OK(context, context->allocate_output(0, virial_shape, &virial_tensor)); - TensorShape atom_virial_shape ; - atom_virial_shape.AddDim (nframes); - atom_virial_shape.AddDim (9 * nall); - Tensor* atom_virial_tensor = NULL; - OP_REQUIRES_OK(context, context->allocate_output(1, atom_virial_shape, &atom_virial_tensor)); - - // flat the tensors - auto net_deriv = net_deriv_tensor.flat(); - auto in_deriv = in_deriv_tensor.flat(); - auto rij = rij_tensor.flat(); - auto nlist = nlist_tensor.flat(); - auto virial = virial_tensor->flat(); - auto atom_virial = atom_virial_tensor->flat(); - - // loop over samples -#pragma omp parallel for - for (int kk = 0; kk < nframes; ++kk){ - int net_iter = kk * nloc * ndescrpt; - int in_iter = kk * nloc * ndescrpt * 3; - int rij_iter = kk * nloc * nnei * 3; - int nlist_iter = kk * nloc * nnei; - int virial_iter = kk * 9; - int atom_virial_iter = kk * nall * 9; - - deepmd::prod_virial_r_cpu( - &virial(virial_iter), - &atom_virial(atom_virial_iter), - &net_deriv(net_iter), - &in_deriv(in_iter), - &rij(rij_iter), - &nlist(nlist_iter), - nloc, - nall, - nnei); - } - } -}; - -#define REGISTER_CPU(T) \ -REGISTER_KERNEL_BUILDER( \ - Name("ProdVirialSeR").Device(DEVICE_CPU).TypeConstraint("T"), \ - ProdVirialSeROp); -REGISTER_CPU(float); -REGISTER_CPU(double); - - - diff --git a/source/tests/common.py b/source/tests/common.py index 4b86ef477d..64a10d6cc5 100644 --- a/source/tests/common.py +++ b/source/tests/common.py @@ -1,6 +1,8 @@ import os, sys, dpdata, shutil import numpy as np import pathlib +import collections +import glob from deepmd.env import tf from deepmd.env import GLOBAL_NP_FLOAT_PRECISION @@ -433,3 +435,464 @@ def run_dp(cmd: str) -> int: main(cmds) return 0 + + +# some tests still need this class +class DataSets (object): + """ + Outdated class for one data system. + .. deprecated:: 2.0.0 + This class is not maintained any more. + """ + def __init__ (self, + sys_path, + set_prefix, + seed = None, + shuffle_test = True) : + self.dirs = glob.glob (os.path.join(sys_path, set_prefix + ".*")) + self.dirs.sort() + # load atom type + self.atom_type, self.idx_map, self.idx3_map = self.load_type (sys_path) + # load atom type map + self.type_map = self.load_type_map(sys_path) + if self.type_map is not None: + assert(len(self.type_map) >= max(self.atom_type)+1) + # train dirs + self.test_dir = self.dirs[-1] + if len(self.dirs) == 1 : + self.train_dirs = self.dirs + else : + self.train_dirs = self.dirs[:-1] + # check fparam + has_fparam = [ os.path.isfile(os.path.join(ii, 'fparam.npy')) for ii in self.dirs ] + if any(has_fparam) and (not all(has_fparam)) : + raise RuntimeError("system %s: if any set has frame parameter, then all sets should have frame parameter" % sys_path) + if all(has_fparam) : + self.has_fparam = 0 + else : + self.has_fparam = -1 + # check aparam + has_aparam = [ os.path.isfile(os.path.join(ii, 'aparam.npy')) for ii in self.dirs ] + if any(has_aparam) and (not all(has_aparam)) : + raise RuntimeError("system %s: if any set has frame parameter, then all sets should have frame parameter" % sys_path) + if all(has_aparam) : + self.has_aparam = 0 + else : + self.has_aparam = -1 + # energy norm + self.eavg = self.stats_energy() + # load sets + self.set_count = 0 + self.load_batch_set (self.train_dirs[self.set_count % self.get_numb_set()]) + self.load_test_set (self.test_dir, shuffle_test) + + def check_batch_size (self, batch_size) : + for ii in self.train_dirs : + tmpe = np.load(os.path.join(ii, "coord.npy")) + if tmpe.shape[0] < batch_size : + return ii, tmpe.shape[0] + return None + + def check_test_size (self, test_size) : + tmpe = np.load(os.path.join(self.test_dir, "coord.npy")) + if tmpe.shape[0] < test_size : + return self.test_dir, tmpe.shape[0] + else : + return None + + def load_type (self, sys_path) : + atom_type = np.loadtxt (os.path.join(sys_path, "type.raw"), dtype=np.int32, ndmin=1) + natoms = atom_type.shape[0] + idx = np.arange (natoms) + idx_map = np.lexsort ((idx, atom_type)) + atom_type3 = np.repeat(atom_type, 3) + idx3 = np.arange (natoms * 3) + idx3_map = np.lexsort ((idx3, atom_type3)) + return atom_type, idx_map, idx3_map + + def load_type_map(self, sys_path) : + fname = os.path.join(sys_path, 'type_map.raw') + if os.path.isfile(fname) : + with open(os.path.join(sys_path, 'type_map.raw')) as fp: + return fp.read().split() + else : + return None + + def get_type_map(self) : + return self.type_map + + def get_numb_set (self) : + return len (self.train_dirs) + + def stats_energy (self) : + eners = np.array([]) + for ii in self.train_dirs: + ener_file = os.path.join(ii, "energy.npy") + if os.path.isfile(ener_file) : + ei = np.load(ener_file) + eners = np.append(eners, ei) + if eners.size == 0 : + return 0 + else : + return np.average(eners) + + def load_energy(self, + set_name, + nframes, + nvalues, + energy_file, + atom_energy_file) : + """ + return : coeff_ener, ener, coeff_atom_ener, atom_ener + """ + # load atom_energy + coeff_atom_ener, atom_ener = self.load_data(set_name, atom_energy_file, [nframes, nvalues], False) + # ignore energy_file + if coeff_atom_ener == 1: + ener = np.sum(atom_ener, axis = 1) + coeff_ener = 1 + # load energy_file + else: + coeff_ener, ener = self.load_data(set_name, energy_file, [nframes], False) + return coeff_ener, ener, coeff_atom_ener, atom_ener + + def load_data(self, set_name, data_name, shape, is_necessary = True): + path = os.path.join(set_name, data_name+".npy") + if os.path.isfile (path) : + data = np.load(path) + data = np.reshape(data, shape) + if is_necessary: + return data + return 1, data + elif is_necessary: + raise OSError("%s not found!" % path) + else: + data = np.zeros(shape) + return 0, data + + def load_set(self, set_name, shuffle = True): + data = {} + data["box"] = self.load_data(set_name, "box", [-1, 9]) + nframe = data["box"].shape[0] + data["coord"] = self.load_data(set_name, "coord", [nframe, -1]) + ncoord = data["coord"].shape[1] + if self.has_fparam >= 0: + data["fparam"] = self.load_data(set_name, "fparam", [nframe, -1]) + if self.has_fparam == 0 : + self.has_fparam = data["fparam"].shape[1] + else : + assert self.has_fparam == data["fparam"].shape[1] + if self.has_aparam >= 0: + data["aparam"] = self.load_data(set_name, "aparam", [nframe, -1]) + if self.has_aparam == 0 : + self.has_aparam = data["aparam"].shape[1] // (ncoord//3) + else : + assert self.has_aparam == data["aparam"].shape[1] // (ncoord//3) + data["prop_c"] = np.zeros(5) + data["prop_c"][0], data["energy"], data["prop_c"][3], data["atom_ener"] \ + = self.load_energy (set_name, nframe, ncoord // 3, "energy", "atom_ener") + data["prop_c"][1], data["force"] = self.load_data(set_name, "force", [nframe, ncoord], False) + data["prop_c"][2], data["virial"] = self.load_data(set_name, "virial", [nframe, 9], False) + data["prop_c"][4], data["atom_pref"] = self.load_data(set_name, "atom_pref", [nframe, ncoord//3], False) + data["atom_pref"] = np.repeat(data["atom_pref"], 3, axis=1) + # shuffle data + if shuffle: + idx = np.arange (nframe) + dp_random.shuffle(idx) + for ii in data: + if ii != "prop_c": + data[ii] = data[ii][idx] + data["type"] = np.tile (self.atom_type, (nframe, 1)) + # sort according to type + for ii in ["type", "atom_ener"]: + data[ii] = data[ii][:, self.idx_map] + for ii in ["coord", "force", "atom_pref"]: + data[ii] = data[ii][:, self.idx3_map] + return data + + def load_batch_set (self, + set_name) : + self.batch_set = self.load_set(set_name, True) + self.reset_iter () + + def load_test_set (self, + set_name, + shuffle_test) : + self.test_set = self.load_set(set_name, shuffle_test) + + def reset_iter (self) : + self.iterator = 0 + self.set_count += 1 + + def get_set(self, data, idx = None) : + new_data = {} + for ii in data: + dd = data[ii] + if ii == "prop_c": + new_data[ii] = dd.astype(np.float32) + else: + if idx is not None: + dd = dd[idx] + if ii == "type": + new_data[ii] = dd + else: + new_data[ii] = dd.astype(GLOBAL_NP_FLOAT_PRECISION) + return new_data + + def get_test (self) : + """ + returned property prefector [4] in order: + energy, force, virial, atom_ener + """ + return self.get_set(self.test_set) + + def get_batch (self, + batch_size) : + """ + returned property prefector [4] in order: + energy, force, virial, atom_ener + """ + set_size = self.batch_set["energy"].shape[0] + # assert (batch_size <= set_size), "batch size should be no more than set size" + if self.iterator + batch_size > set_size : + self.load_batch_set (self.train_dirs[self.set_count % self.get_numb_set()]) + set_size = self.batch_set["energy"].shape[0] + # print ("%d %d %d" % (self.iterator, self.iterator + batch_size, set_size)) + iterator_1 = self.iterator + batch_size + if iterator_1 >= set_size : + iterator_1 = set_size + idx = np.arange (self.iterator, iterator_1) + self.iterator += batch_size + return self.get_set(self.batch_set, idx) + + def get_natoms (self) : + sample_type = self.batch_set["type"][0] + natoms = len(sample_type) + return natoms + + def get_natoms_2 (self, ntypes) : + sample_type = self.batch_set["type"][0] + natoms = len(sample_type) + natoms_vec = np.zeros (ntypes).astype(int) + for ii in range (ntypes) : + natoms_vec[ii] = np.count_nonzero(sample_type == ii) + return natoms, natoms_vec + + def get_natoms_vec (self, ntypes) : + natoms, natoms_vec = self.get_natoms_2 (ntypes) + tmp = [natoms, natoms] + tmp = np.append (tmp, natoms_vec) + return tmp.astype(np.int32) + + def set_numb_batch (self, + batch_size) : + return self.batch_set["energy"].shape[0] // batch_size + + def get_sys_numb_batch (self, batch_size) : + return self.set_numb_batch(batch_size) * self.get_numb_set() + + def get_ener (self) : + return self.eavg + + def numb_fparam(self) : + return self.has_fparam + + def numb_aparam(self) : + return self.has_aparam + + +class DataSystem (object) : + """ + Outdated class for the data systems. + .. deprecated:: 2.0.0 + This class is not maintained any more. + """ + def __init__ (self, + systems, + set_prefix, + batch_size, + test_size, + rcut, + run_opt = None) : + self.system_dirs = systems + self.nsystems = len(self.system_dirs) + self.batch_size = batch_size + if isinstance(self.batch_size, int) : + self.batch_size = self.batch_size * np.ones(self.nsystems, dtype=int) + assert(isinstance(self.batch_size, (list,np.ndarray))) + assert(len(self.batch_size) == self.nsystems) + self.data_systems = [] + self.ntypes = [] + self.natoms = [] + self.natoms_vec = [] + self.nbatches = [] + for ii in self.system_dirs : + self.data_systems.append(DataSets(ii, set_prefix)) + sys_all_types = np.loadtxt(os.path.join(ii, "type.raw")).astype(int) + self.ntypes.append(np.max(sys_all_types) + 1) + self.sys_ntypes = max(self.ntypes) + type_map = [] + for ii in range(self.nsystems) : + self.natoms.append(self.data_systems[ii].get_natoms()) + self.natoms_vec.append(self.data_systems[ii].get_natoms_vec(self.sys_ntypes).astype(int)) + self.nbatches.append(self.data_systems[ii].get_sys_numb_batch(self.batch_size[ii])) + type_map.append(self.data_systems[ii].get_type_map()) + self.type_map = self.check_type_map_consistency(type_map) + + # check frame parameters + has_fparam = [ii.numb_fparam() for ii in self.data_systems] + for ii in has_fparam : + if ii != has_fparam[0] : + raise RuntimeError("if any system has frame parameter, then all systems should have the same number of frame parameter") + self.has_fparam = has_fparam[0] + + # check the size of data if they satisfy the requirement of batch and test + for ii in range(self.nsystems) : + chk_ret = self.data_systems[ii].check_batch_size(self.batch_size[ii]) + if chk_ret is not None : + raise RuntimeError ("system %s required batch size %d is larger than the size %d of the dataset %s" % \ + (self.system_dirs[ii], self.batch_size[ii], chk_ret[1], chk_ret[0])) + chk_ret = self.data_systems[ii].check_test_size(test_size) + if chk_ret is not None : + print("WARNNING: system %s required test size %d is larger than the size %d of the dataset %s" % \ + (self.system_dirs[ii], test_size, chk_ret[1], chk_ret[0])) + + if run_opt is not None: + self.print_summary(run_opt) + + self.prob_nbatches = [ float(i) for i in self.nbatches] / np.sum(self.nbatches) + + self.test_data = collections.defaultdict(list) + self.default_mesh = [] + for ii in range(self.nsystems) : + test_system_data = self.data_systems[ii].get_test () + for nn in test_system_data: + self.test_data[nn].append(test_system_data[nn]) + cell_size = np.max (rcut) + avg_box = np.average (test_system_data["box"], axis = 0) + avg_box = np.reshape (avg_box, [3,3]) + ncell = (np.linalg.norm(avg_box, axis=1)/ cell_size).astype(np.int32) + ncell[ncell < 2] = 2 + default_mesh = np.zeros (6, dtype = np.int32) + default_mesh[3:6] = ncell + self.default_mesh.append(default_mesh) + self.pick_idx = 0 + + + def check_type_map_consistency(self, type_map_list): + ret = [] + for ii in type_map_list: + if ii is not None: + min_len = min([len(ii), len(ret)]) + for idx in range(min_len) : + if ii[idx] != ret[idx] : + raise RuntimeError('inconsistent type map: %s %s' % (str(ret), str(ii))) + if len(ii) > len(ret) : + ret = ii + return ret + + + def get_type_map(self): + return self.type_map + + + def format_name_length(self, name, width) : + if len(name) <= width: + return '{: >{}}'.format(name, width) + else : + name = name[-(width-3):] + name = '-- ' + name + return name + + def print_summary(self) : + tmp_msg = "" + # width 65 + sys_width = 42 + tmp_msg += "---Summary of DataSystem-----------------------------------------\n" + tmp_msg += "find %d system(s):\n" % self.nsystems + tmp_msg += "%s " % self.format_name_length('system', sys_width) + tmp_msg += "%s %s %s\n" % ('natoms', 'bch_sz', 'n_bch') + for ii in range(self.nsystems) : + tmp_msg += ("%s %6d %6d %5d\n" % + (self.format_name_length(self.system_dirs[ii], sys_width), + self.natoms[ii], + self.batch_size[ii], + self.nbatches[ii]) ) + tmp_msg += "-----------------------------------------------------------------\n" + #log.info(tmp_msg) + + def compute_energy_shift(self) : + sys_ener = np.array([]) + for ss in self.data_systems : + sys_ener = np.append(sys_ener, ss.get_ener()) + sys_tynatom = np.array(self.natoms_vec, dtype = float) + sys_tynatom = np.reshape(sys_tynatom, [self.nsystems,-1]) + sys_tynatom = sys_tynatom[:,2:] + energy_shift,resd,rank,s_value \ + = np.linalg.lstsq(sys_tynatom, sys_ener, rcond = 1e-3) + return energy_shift + + def process_sys_weights(self, sys_weights) : + sys_weights = np.array(sys_weights) + type_filter = sys_weights >= 0 + assigned_sum_prob = np.sum(type_filter * sys_weights) + assert assigned_sum_prob <= 1, "the sum of assigned probability should be less than 1" + rest_sum_prob = 1. - assigned_sum_prob + rest_nbatch = (1 - type_filter) * self.nbatches + rest_prob = rest_sum_prob * rest_nbatch / np.sum(rest_nbatch) + ret_prob = rest_prob + type_filter * sys_weights + assert np.sum(ret_prob) == 1, "sum of probs should be 1" + return ret_prob + + def get_batch (self, + sys_idx = None, + sys_weights = None, + style = "prob_sys_size") : + if sys_idx is not None : + self.pick_idx = sys_idx + else : + if sys_weights is None : + if style == "prob_sys_size" : + prob = self.prob_nbatches + elif style == "prob_uniform" : + prob = None + else : + raise RuntimeError("unkown get_batch style") + else : + prob = self.process_sys_weights(sys_weights) + self.pick_idx = dp_random.choice(np.arange(self.nsystems), p=prob) + b_data = self.data_systems[self.pick_idx].get_batch(self.batch_size[self.pick_idx]) + b_data["natoms_vec"] = self.natoms_vec[self.pick_idx] + b_data["default_mesh"] = self.default_mesh[self.pick_idx] + return b_data + + def get_test (self, + sys_idx = None) : + if sys_idx is not None : + idx = sys_idx + else : + idx = self.pick_idx + test_system_data = {} + for nn in self.test_data: + test_system_data[nn] = self.test_data[nn][idx] + test_system_data["natoms_vec"] = self.natoms_vec[idx] + test_system_data["default_mesh"] = self.default_mesh[idx] + return test_system_data + + def get_nbatches (self) : + return self.nbatches + + def get_ntypes (self) : + return self.sys_ntypes + + def get_nsystems (self) : + return self.nsystems + + def get_sys (self, sys_idx) : + return self.data_systems[sys_idx] + + def get_batch_size(self) : + return self.batch_size + + def numb_fparam(self) : + return self.has_fparam diff --git a/source/tests/test_class_arg.py b/source/tests/test_class_arg.py deleted file mode 100644 index e573dff0bd..0000000000 --- a/source/tests/test_class_arg.py +++ /dev/null @@ -1,87 +0,0 @@ -import os,sys -import numpy as np -import unittest - -from deepmd.common import ClassArg - -class TestClassArg (unittest.TestCase) : - def test_add (self) : - ca = ClassArg().add('test', int) - test_dict = {'test' : 10, - 'test1' : 20} - ca.parse(test_dict) - self.assertEqual(ca.get_dict(), {'test':10}) - - def test_add_multi (self) : - ca = ClassArg()\ - .add('test', int)\ - .add('test1', str) - test_dict = {'test' : 10, - 'test1' : 'foo'} - ca.parse(test_dict) - self.assertEqual(ca.get_dict(), {'test1':'foo', 'test':10}) - - def test_add_multi_types (self) : - ca = ClassArg()\ - .add('test', [str, list])\ - .add('test1', [str, list]) - test_dict = {'test' : [10,20], 'test1' : 10} - ca.parse(test_dict) - self.assertEqual(ca.get_dict(), {'test':[10,20], 'test1':'10'}) - - def test_add_type_cvt (self) : - ca = ClassArg().add('test', float) - test_dict = {'test' : '10'} - ca.parse(test_dict) - self.assertEqual(ca.get_dict(), {'test':10.0}) - - def test_add_wrong_type_cvt (self) : - ca = ClassArg().add('test', list) - test_dict = {'test' : 10} - with self.assertRaises(TypeError): - ca.parse(test_dict) - - def test_add_alias (self) : - ca = ClassArg().add('test', str, alias = ['test1', 'test2']) - test_dict = {'test2' : 'foo'} - ca.parse(test_dict) - self.assertEqual(ca.get_dict(), {'test': 'foo'}) - - def test_add_default (self) : - ca = ClassArg().add('test', str, alias = ['test1', 'test2'], default = 'bar') - test_dict = {'test3' : 'foo'} - ca.parse(test_dict) - self.assertEqual(ca.get_dict(), {'test': 'bar'}) - - def test_add_default_overwrite (self) : - ca = ClassArg().add('test', str, alias = ['test1', 'test2'], default = 'bar') - test_dict = {'test2' : 'foo'} - ca.parse(test_dict) - self.assertEqual(ca.get_dict(), {'test': 'foo'}) - - def test_add_must (self) : - ca = ClassArg().add('test', str, must = True) - test_dict = {'test2' : 'foo'} - with self.assertRaises(RuntimeError): - ca.parse(test_dict) - - def test_add_none (self) : - ca = ClassArg().add('test', int) - test_dict = {'test2' : 'foo'} - ca.parse(test_dict) - self.assertEqual(ca.get_dict(), {'test': None}) - - def test_multi_add (self) : - ca = ClassArg().add('test', int) - test_dict = {'test2' : 'foo'} - ca.parse(test_dict) - self.assertEqual(ca.get_dict(), {'test': None}) - ca.add('test2', str) - ca.parse(test_dict) - self.assertEqual(ca.get_dict(), {'test': None, 'test2':'foo'}) - - - - -if __name__ == '__main__': - unittest.main() diff --git a/source/tests/test_descrpt_se_a_type.py b/source/tests/test_descrpt_se_a_type.py index f65338d0a4..f61b25f8c6 100644 --- a/source/tests/test_descrpt_se_a_type.py +++ b/source/tests/test_descrpt_se_a_type.py @@ -5,7 +5,7 @@ import pickle from common import Data,gen_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeA from deepmd.fit import EnerFitting from deepmd.model import EnerModel diff --git a/source/tests/test_descrpt_se_atten.py b/source/tests/test_descrpt_se_atten.py index 777ba0e6bc..b60b5de1c8 100644 --- a/source/tests/test_descrpt_se_atten.py +++ b/source/tests/test_descrpt_se_atten.py @@ -4,7 +4,7 @@ import pickle from common import Data, gen_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeAtten from deepmd.fit import EnerFitting from deepmd.model import EnerModel diff --git a/source/tests/test_dipole_se_a.py b/source/tests/test_dipole_se_a.py index 42a615300a..8f06efdfeb 100644 --- a/source/tests/test_dipole_se_a.py +++ b/source/tests/test_dipole_se_a.py @@ -4,7 +4,7 @@ from common import Data,gen_data, j_loader from common import finite_difference, strerch_box -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeA from deepmd.fit import DipoleFittingSeA from deepmd.model import DipoleModel diff --git a/source/tests/test_dipole_se_a_tebd.py b/source/tests/test_dipole_se_a_tebd.py index 35a143398f..830528d007 100644 --- a/source/tests/test_dipole_se_a_tebd.py +++ b/source/tests/test_dipole_se_a_tebd.py @@ -4,7 +4,7 @@ from common import Data, gen_data, j_loader from common import finite_difference, strerch_box -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeA from deepmd.fit import DipoleFittingSeA from deepmd.utils.type_embed import TypeEmbedNet diff --git a/source/tests/test_fitting_ener_type.py b/source/tests/test_fitting_ener_type.py index 5ccb4797ab..0451460917 100644 --- a/source/tests/test_fitting_ener_type.py +++ b/source/tests/test_fitting_ener_type.py @@ -5,7 +5,7 @@ import pickle from common import Data,gen_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeA from deepmd.fit import EnerFitting from deepmd.model import EnerModel diff --git a/source/tests/test_model_loc_frame.py b/source/tests/test_model_loc_frame.py index 5e0399fbb7..e37354778d 100644 --- a/source/tests/test_model_loc_frame.py +++ b/source/tests/test_model_loc_frame.py @@ -3,7 +3,7 @@ from deepmd.env import tf from common import Data,gen_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptLocFrame from deepmd.fit import EnerFitting from deepmd.model import EnerModel diff --git a/source/tests/test_model_multi.py b/source/tests/test_model_multi.py index 4c9d53d450..a9a043d934 100644 --- a/source/tests/test_model_multi.py +++ b/source/tests/test_model_multi.py @@ -3,7 +3,7 @@ from deepmd.env import tf from common import Data, gen_data, del_data, j_loader, finite_difference, strerch_box -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeA from deepmd.fit import EnerFitting, DipoleFittingSeA from deepmd.model import MultiModel diff --git a/source/tests/test_model_se_a.py b/source/tests/test_model_se_a.py index 0dce8f1b28..8180a0c4bd 100644 --- a/source/tests/test_model_se_a.py +++ b/source/tests/test_model_se_a.py @@ -4,7 +4,7 @@ from deepmd.env import tf from common import Data, gen_data, del_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeA from deepmd.fit import EnerFitting from deepmd.model import EnerModel diff --git a/source/tests/test_model_se_a_aparam.py b/source/tests/test_model_se_a_aparam.py index f9a6817092..d2269ac642 100644 --- a/source/tests/test_model_se_a_aparam.py +++ b/source/tests/test_model_se_a_aparam.py @@ -3,7 +3,7 @@ from deepmd.env import tf from common import Data,gen_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeA from deepmd.fit import EnerFitting from deepmd.model import EnerModel diff --git a/source/tests/test_model_se_a_fparam.py b/source/tests/test_model_se_a_fparam.py index 469a5c429b..b940671b87 100644 --- a/source/tests/test_model_se_a_fparam.py +++ b/source/tests/test_model_se_a_fparam.py @@ -3,7 +3,7 @@ from deepmd.env import tf from common import Data,gen_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeA from deepmd.fit import EnerFitting from deepmd.model import EnerModel diff --git a/source/tests/test_model_se_a_srtab.py b/source/tests/test_model_se_a_srtab.py index acfe62ce1e..855503691d 100644 --- a/source/tests/test_model_se_a_srtab.py +++ b/source/tests/test_model_se_a_srtab.py @@ -3,7 +3,7 @@ from deepmd.env import tf from common import Data,gen_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeA from deepmd.fit import EnerFitting from deepmd.model import EnerModel diff --git a/source/tests/test_model_se_a_type.py b/source/tests/test_model_se_a_type.py index 6e80839d75..18f40eaa16 100644 --- a/source/tests/test_model_se_a_type.py +++ b/source/tests/test_model_se_a_type.py @@ -4,7 +4,7 @@ import pickle from common import Data,gen_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeA from deepmd.fit import EnerFitting from deepmd.model import EnerModel diff --git a/source/tests/test_model_se_atten.py b/source/tests/test_model_se_atten.py index d8bce857b0..e284186855 100644 --- a/source/tests/test_model_se_atten.py +++ b/source/tests/test_model_se_atten.py @@ -4,7 +4,7 @@ import pickle from common import Data, gen_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeAtten from deepmd.fit import EnerFitting from deepmd.model import EnerModel diff --git a/source/tests/test_model_se_r.py b/source/tests/test_model_se_r.py index 56052db8d3..52aad18fc5 100644 --- a/source/tests/test_model_se_r.py +++ b/source/tests/test_model_se_r.py @@ -3,7 +3,7 @@ from deepmd.env import tf from common import Data,gen_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeR from deepmd.fit import EnerFitting from deepmd.model import EnerModel diff --git a/source/tests/test_model_se_t.py b/source/tests/test_model_se_t.py index dead21c2d0..d33b9a6658 100644 --- a/source/tests/test_model_se_t.py +++ b/source/tests/test_model_se_t.py @@ -3,7 +3,7 @@ from deepmd.env import tf from common import Data,gen_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeT from deepmd.fit import EnerFitting from deepmd.model import EnerModel diff --git a/source/tests/test_nvnmd_se_a.py b/source/tests/test_nvnmd_se_a.py index bc5cb34c98..7346975b63 100644 --- a/source/tests/test_nvnmd_se_a.py +++ b/source/tests/test_nvnmd_se_a.py @@ -5,7 +5,7 @@ import pickle from common import Data, gen_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeA from deepmd.fit import EnerFitting from deepmd.model import EnerModel diff --git a/source/tests/test_polar_se_a.py b/source/tests/test_polar_se_a.py index ee298bfb8b..fe7a5f2288 100644 --- a/source/tests/test_polar_se_a.py +++ b/source/tests/test_polar_se_a.py @@ -4,7 +4,7 @@ from common import Data,gen_data, j_loader from common import finite_difference, strerch_box -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeA from deepmd.fit import PolarFittingSeA from deepmd.model import PolarModel diff --git a/source/tests/test_polar_se_a_tebd.py b/source/tests/test_polar_se_a_tebd.py index 10449409b1..a8348a8795 100644 --- a/source/tests/test_polar_se_a_tebd.py +++ b/source/tests/test_polar_se_a_tebd.py @@ -4,7 +4,7 @@ from common import Data, gen_data, j_loader from common import finite_difference, strerch_box -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import DescrptSeA from deepmd.fit import PolarFittingSeA from deepmd.utils.type_embed import TypeEmbedNet diff --git a/source/tests/test_type_one_side.py b/source/tests/test_type_one_side.py index aa1548f637..b3dc874a63 100644 --- a/source/tests/test_type_one_side.py +++ b/source/tests/test_type_one_side.py @@ -3,7 +3,7 @@ from deepmd.env import tf from common import gen_data, j_loader -from deepmd.utils.data_system import DataSystem +from common import DataSystem from deepmd.descriptor import Descriptor from deepmd.common import j_must_have diff --git a/source/tests/test_wfc.py b/source/tests/test_wfc.py deleted file mode 100644 index 80f9d48126..0000000000 --- a/source/tests/test_wfc.py +++ /dev/null @@ -1,97 +0,0 @@ -import dpdata,os,sys,unittest -import numpy as np -from deepmd.env import tf -from common import Data,gen_data, j_loader - -from deepmd.utils.data_system import DataSystem -from deepmd.descriptor import DescrptLocFrame -from deepmd.fit import WFCFitting -from deepmd.model import WFCModel -from deepmd.common import j_must_have - -GLOBAL_ENER_FLOAT_PRECISION = tf.float64 -GLOBAL_TF_FLOAT_PRECISION = tf.float64 -GLOBAL_NP_FLOAT_PRECISION = np.float64 - -class TestModel(tf.test.TestCase): - def setUp(self) : - gen_data() - - def test_model(self): - jfile = 'wfc.json' - jdata = j_loader(jfile) - - systems = j_must_have(jdata, 'systems') - set_pfx = j_must_have(jdata, 'set_prefix') - batch_size = j_must_have(jdata, 'batch_size') - test_size = j_must_have(jdata, 'numb_test') - batch_size = 1 - test_size = 1 - stop_batch = j_must_have(jdata, 'stop_batch') - rcut = j_must_have (jdata['model']['descriptor'], 'rcut') - - data = DataSystem(systems, set_pfx, batch_size, test_size, rcut, run_opt = None) - - test_data = data.get_test () - numb_test = 1 - - jdata['model']['descriptor'].pop('type', None) - jdata['model']['descriptor'].pop('_comment', None) - descrpt = DescrptLocFrame(**jdata['model']['descriptor']) - jdata['model']['fitting_net']['uniform_seed'] = True - fitting = WFCFitting(jdata['model']['fitting_net'], descrpt) - model = WFCModel(descrpt, fitting) - - input_data = {'coord' : [test_data['coord']], - 'box': [test_data['box']], - 'type': [test_data['type']], - 'natoms_vec' : [test_data['natoms_vec']], - 'default_mesh' : [test_data['default_mesh']], - 'fparam': [test_data['fparam']], - } - model._compute_input_stat(input_data) - - t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c') - t_energy = tf.placeholder(GLOBAL_ENER_FLOAT_PRECISION, [None], name='t_energy') - t_force = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name='t_force') - t_virial = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name='t_virial') - t_atom_ener = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name='t_atom_ener') - t_coord = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name='i_coord') - t_type = tf.placeholder(tf.int32, [None], name='i_type') - t_natoms = tf.placeholder(tf.int32, [model.ntypes+2], name='i_natoms') - t_box = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None, 9], name='i_box') - t_mesh = tf.placeholder(tf.int32, [None], name='i_mesh') - is_training = tf.placeholder(tf.bool) - t_fparam = None - - model_pred \ - = model.build (t_coord, - t_type, - t_natoms, - t_box, - t_mesh, - t_fparam, - suffix = "wfc", - reuse = False) - wfc = model_pred['wfc'] - - feed_dict_test = {t_prop_c: test_data['prop_c'], - t_coord: np.reshape(test_data['coord'] [:numb_test, :], [-1]), - t_box: test_data['box'] [:numb_test, :], - t_type: np.reshape(test_data['type'] [:numb_test, :], [-1]), - t_natoms: test_data['natoms_vec'], - t_mesh: test_data['default_mesh'], - is_training: False} - - sess = self.test_session().__enter__() - sess.run(tf.global_variables_initializer()) - [p] = sess.run([wfc], feed_dict = feed_dict_test) - - p = p.reshape([-1]) - refp = [-9.105016838228578990e-01,7.196284362034099935e-01,-9.548516928185298014e-02,2.764615027095288724e+00,2.661319598995644520e-01,7.579512949131941846e-02,-2.107409067376114997e+00,-1.299080016614967414e-01,-5.962778584850070285e-01,2.913899917663253514e-01,-1.226917174638697094e+00,1.829523069930876655e+00,1.015704024959750873e+00,-1.792333611099589386e-01,5.032898080485321834e-01,1.808561721292949453e-01,2.468863482075112081e+00,-2.566442546384765100e-01,-1.467453783795173994e-01,-1.822963931552128658e+00,5.843600156865462747e-01,-1.493875280832117403e+00,1.693322352814763398e-01,-1.877325443995481624e+00] - - places = 6 - np.testing.assert_almost_equal(p, refp, places) - - -