Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions deepmd/descriptor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from .se_ar import DescrptSeAR
from .se_t import DescrptSeT
from .se_a_ebd import DescrptSeAEbd
from .se_a_type import DescrptSeAEbd_type
from .se_a_ef import DescrptSeAEf
from .se_a_ef import DescrptSeAEfLower
from .loc_frame import DescrptLocFrame
807 changes: 807 additions & 0 deletions deepmd/descriptor/se_a_type.py

Large diffs are not rendered by default.

128 changes: 127 additions & 1 deletion deepmd/fit/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ def __init__ (self,
seed : int = 1,
atom_ener : List[float] = [],
activation_function : str = 'tanh',
precision : str = 'default'
precision : str = 'default',
share_fitting : bool = False
) -> None:
"""
Constructor
Expand Down Expand Up @@ -86,6 +87,7 @@ def __init__ (self,
self.fitting_activation_fn = get_activation_func(activation_function)
self.fitting_precision = get_precision(precision)
self.trainable = trainable
self.share_fitting = share_fitting
if self.trainable is None:
self.trainable = [True for ii in range(len(self.n_neuron) + 1)]
if type(self.trainable) is bool:
Expand Down Expand Up @@ -366,3 +368,127 @@ def build (self,



def build_share (self,
inputs : tf.Tensor,
atype: tf.Tensor,
type_embedding: tf.Tensor,
natoms : tf.Tensor,
input_dict : dict = {},
reuse : bool = None,
suffix : str = ''
) -> tf.Tensor:
"""
Build the computational graph for fitting net

Parameters
----------
inputs
The input descriptor
input_dict
Additional dict for inputs.
if numb_fparam > 0, should have input_dict['fparam']
if numb_aparam > 0, should have input_dict['aparam']
natoms
The number of atoms. This tensor has the length of Ntypes + 2
natoms[0]: number of local atoms
natoms[1]: total number of atoms held by this processor
natoms[i]: 2 <= i < Ntypes+2, number of type i atoms
reuse
The weights in the networks should be reused when get the variable.
suffix
Name suffix to identify this descriptor

Return
------
ener
The system energy
"""
bias_atom_e = self.bias_atom_e
if self.numb_fparam > 0 and ( self.fparam_avg is None or self.fparam_inv_std is None ):
raise RuntimeError('No data stat result. one should do data statisitic, before build')
if self.numb_aparam > 0 and ( self.aparam_avg is None or self.aparam_inv_std is None ):
raise RuntimeError('No data stat result. one should do data statisitic, before build')

with tf.variable_scope('fitting_attr' + suffix, reuse = reuse) :
t_dfparam = tf.constant(self.numb_fparam,
name = 'dfparam',
dtype = tf.int32)
t_daparam = tf.constant(self.numb_aparam,
name = 'daparam',
dtype = tf.int32)
if self.numb_fparam > 0:
t_fparam_avg = tf.get_variable('t_fparam_avg',
self.numb_fparam,
dtype = GLOBAL_TF_FLOAT_PRECISION,
trainable = False,
initializer = tf.constant_initializer(self.fparam_avg))
t_fparam_istd = tf.get_variable('t_fparam_istd',
self.numb_fparam,
dtype = GLOBAL_TF_FLOAT_PRECISION,
trainable = False,
initializer = tf.constant_initializer(self.fparam_inv_std))
if self.numb_aparam > 0:
t_aparam_avg = tf.get_variable('t_aparam_avg',
self.numb_aparam,
dtype = GLOBAL_TF_FLOAT_PRECISION,
trainable = False,
initializer = tf.constant_initializer(self.aparam_avg))
t_aparam_istd = tf.get_variable('t_aparam_istd',
self.numb_aparam,
dtype = GLOBAL_TF_FLOAT_PRECISION,
trainable = False,
initializer = tf.constant_initializer(self.aparam_inv_std))

atype_shape = atype.get_shape().as_list()
type_shape = type_embedding.get_shape().as_list()
atm_embed = tf.nn.embedding_lookup(type_embedding,atype)
atm_embed = tf.reshape(atm_embed,[-1,type_shape[1]])
inputs = tf.concat([tf.reshape(inputs,[-1,self.dim_descrpt]),atm_embed],axis=1)
start_index = 0
inputs = tf.cast(tf.reshape(inputs, [-1, (self.dim_descrpt+type_shape[1]) * natoms[0]]), self.fitting_precision)

if bias_atom_e is not None :
assert(len(bias_atom_e) == self.ntypes)

if self.numb_fparam > 0 :
fparam = input_dict['fparam']
fparam = tf.reshape(fparam, [-1, self.numb_fparam])
fparam = (fparam - t_fparam_avg) * t_fparam_istd
if self.numb_aparam > 0 :
aparam = input_dict['aparam']
aparam = tf.reshape(aparam, [-1, self.numb_aparam])
aparam = (aparam - t_aparam_avg) * t_aparam_istd
aparam = tf.reshape(aparam, [-1, self.numb_aparam * natoms[0]])

layer = tf.reshape(inputs, [-1, self.dim_descrpt+type_shape[1]])
type_bias_ae=0
if self.numb_fparam > 0 :
ext_fparam = tf.tile(fparam, [1, natoms[0]])
ext_fparam = tf.reshape(ext_fparam, [-1, self.numb_fparam])
ext_fparam = tf.cast(ext_fparam,self.fitting_precision)
layer = tf.concat([layer, ext_fparam], axis = 1)
if self.numb_aparam > 0 :
ext_aparam = tf.reshape(ext_aparam, [-1, self.numb_aparam])
ext_aparam = tf.cast(ext_aparam,self.fitting_precision)
layer = tf.concat([layer, ext_aparam], axis = 1)

for ii in range(0,len(self.n_neuron)) :
if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] :
layer+= one_layer(layer, self.n_neuron[ii], name='fitting_layer_'+str(ii)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, trainable = self.trainable[ii])
else :
layer = one_layer(layer, self.n_neuron[ii], name='fitting_layer_'+str(ii)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, trainable = self.trainable[ii])
final_layer = one_layer(layer, 1, activation_fn = None, bavg = type_bias_ae, name='fitting_final_layer_'+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision, trainable = self.trainable[-1])


outs = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[0]])

if self.tot_ener_zero:
force_tot_ener = 0.0
outs = tf.reshape(outs, [-1, natoms[0]])
outs_mean = tf.reshape(tf.reduce_mean(outs, axis = 1), [-1, 1])
outs_mean = outs_mean - tf.ones_like(outs_mean, dtype = GLOBAL_TF_FLOAT_PRECISION) * (force_tot_ener/global_cvt_2_tf_float(natoms[0]))
outs = outs - outs_mean
outs = tf.reshape(outs, [-1])

tf.summary.histogram('fitting_net_output', outs)
return tf.cast(tf.reshape(outs, [-1]), GLOBAL_TF_FLOAT_PRECISION)
24 changes: 18 additions & 6 deletions deepmd/model/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from deepmd.env import global_cvt_2_ener_float, MODEL_VERSION
from deepmd.env import op_module
from .model_stat import make_stat_input, merge_sys_stat
from deepmd.descriptor import DescrptSeAEbd_type

class EnerModel() :
model_type = 'ener'
Expand Down Expand Up @@ -138,28 +139,39 @@ def build (self,
coord = tf.reshape (coord_, [-1, natoms[1] * 3])
atype = tf.reshape (atype_, [-1, natoms[1]])


dout \
= self.descrpt.build(coord_,
atype_,
natoms,
box,
mesh,
input_dict,
suffix = suffix,
reuse = reuse)
reuse = reuse,
suffix = suffix)
dout = tf.identity(dout, name='o_descriptor')

if isinstance(self.descrpt,DescrptSeAEbd_type):
type_embedding = self.descrpt.type_embed_net.fetch_type_embedding()
type_embedding = tf.identity(type_embedding,name ='t_embed')
if self.srtab is not None :
nlist, rij, sel_a, sel_r = self.descrpt.get_nlist()
nnei_a = np.cumsum(sel_a)[-1]
nnei_r = np.cumsum(sel_r)[-1]

atom_ener = self.fitting.build (dout,

if self.fitting.share_fitting:
atom_ener = self.fitting.build_share(dout,
atype_,
type_embedding,
natoms,
input_dict,
reuse = reuse,
suffix = suffix)

else:
atom_ener = self.fitting.build (dout,
natoms,
input_dict,
reuse = reuse,
suffix = suffix)
if self.srtab is not None :
sw_lambda, sw_deriv \
= op_module.soft_min_switch(atype,
Expand Down
6 changes: 4 additions & 2 deletions deepmd/train/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from deepmd.descriptor import DescrptLocFrame
from deepmd.descriptor import DescrptSeA
from deepmd.descriptor import DescrptSeT
from deepmd.descriptor import DescrptSeAEbd
from deepmd.descriptor import DescrptSeAEbd,DescrptSeAEbd_type
from deepmd.descriptor import DescrptSeAEf
from deepmd.descriptor import DescrptSeR
from deepmd.descriptor import DescrptSeAR
Expand Down Expand Up @@ -61,8 +61,10 @@ def _generate_descrpt_from_param_dict(descrpt_param):
descrpt = DescrptSeR(**descrpt_param)
elif descrpt_type == 'se_e3' or descrpt_type == 'se_at' or descrpt_type == 'se_a_3be' :
descrpt = DescrptSeT(**descrpt_param)
elif descrpt_type == 'se_a_tpe' or descrpt_type == 'se_a_ebd' :
elif descrpt_type == 'se_a_tpe' :
descrpt = DescrptSeAEbd(**descrpt_param)
elif descrpt_type == 'se_a_ebd':
descrpt = DescrptSeAEbd_type(**descrpt_param)
elif descrpt_type == 'se_a_ef' :
descrpt = DescrptSeAEf(**descrpt_param)
elif descrpt_type == 'se_ar' :
Expand Down
39 changes: 38 additions & 1 deletion deepmd/utils/argcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,38 @@ def descrpt_se_a_tpe_args():
Argument("numb_aparam", int, optional = True, default = 0, doc = doc_numb_aparam)
]

def descrpt_se_a_ebd_args():
doc_sel = 'A list of integers. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius.'
doc_rcut = 'The cut-off radius.'
doc_rcut_smth = 'Where to start smoothing. For example the 1/r term is smoothed from `rcut` to `rcut_smth`'
doc_neuron = 'Number of neurons in each hidden layers of the embedding net. When two layers are of the same size or one layer is twice as large as the previous layer, a skip connection is built.'
doc_axis_neuron = 'Size of the submatrix of G (embedding matrix).'
doc_type_filter = 'Size of type embed net'
doc_activation_function = f'The activation function in the embedding net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())}'
doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection'
doc_type_one_side = 'Try to build N_types embedding nets. Otherwise, building N_types^2 embedding nets'
doc_precision = f'The precision of the embedding net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())}'
doc_trainable = 'If the parameters in the embedding net is trainable'
doc_seed = 'Random seed for parameter initialization'
doc_exclude_types = 'The Excluded types'
doc_set_davg_zero = 'Set the normalization average to zero. This option should be set when `atom_ener` in the energy fitting is used'

return [
Argument("sel", list, optional = False, doc = doc_sel),
Argument("rcut", float, optional = True, default = 6.0, doc = doc_rcut),
Argument("rcut_smth", float, optional = True, default = 0.5, doc = doc_rcut_smth),
Argument("neuron", list, optional = True, default = [10,20,40], doc = doc_neuron),
Argument("axis_neuron", int, optional = True, default = 4, doc = doc_axis_neuron),
Argument("activation_function", str, optional = True, default = 'tanh', doc = doc_activation_function),
Argument("resnet_dt", bool, optional = True, default = False, doc = doc_resnet_dt),
Argument("type_one_side", bool, optional = True, default = False, doc = doc_type_one_side),
Argument("type_filter", list, optional = True, default = [5,10,10], doc = doc_type_filter),
Argument("precision", str, optional = True, default = "float64", doc = doc_precision),
Argument("trainable", bool, optional = True, default = True, doc = doc_trainable),
Argument("seed", [int,None], optional = True, doc = doc_seed),
Argument("exclude_types", list, optional = True, default = [], doc = doc_exclude_types),
Argument("set_davg_zero", bool, optional = True, default = False, doc = doc_set_davg_zero)
]

def descrpt_se_r_args():
doc_sel = 'A list of integers. The length of the list should be the same as the number of atom types in the system. `sel[i]` gives the selected number of type-i neighbors. `sel[i]` is recommended to be larger than the maximally possible number of type-i neighbors in the cut-off radius.'
Expand Down Expand Up @@ -166,21 +198,24 @@ def descrpt_variant_type_args():
link_se_e2_r = make_link('se_e2_r', 'model/descriptor[se_e2_r]')
link_se_e3 = make_link('se_e3', 'model/descriptor[se_e3]')
link_se_a_tpe = make_link('se_a_tpe', 'model/descriptor[se_a_tpe]')
link_se_a_ebd = make_link('se_a_ebd', 'model/descriptor[se_a_ebd]')
link_hybrid = make_link('hybrid', 'model/descriptor[hybrid]')
doc_descrpt_type = f'The type of the descritpor. See explanation below. \n\n\
- `loc_frame`: Defines a local frame at each atom, and the compute the descriptor as local coordinates under this frame.\n\n\
- `se_e2_a`: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor.\n\n\
- `se_e2_r`: Used by the smooth edition of Deep Potential. Only the distance between atoms is used to construct the descriptor.\n\n\
- `se_e3`: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Three-body embedding will be used by this descriptor.\n\n\
- `se_a_tpe`: Used by the smooth edition of Deep Potential. The full relative coordinates are used to construct the descriptor. Type embedding will be used by this descriptor.\n\n\
- `se_a_ebd`: Type embedding will be used by this descriptor.\n\n\
- `hybrid`: Concatenate of a list of descriptors as a new descriptor.'

return Variant("type", [
Argument("loc_frame", dict, descrpt_local_frame_args()),
Argument("se_e2_a", dict, descrpt_se_a_args(), alias = ['se_a']),
Argument("se_e2_r", dict, descrpt_se_r_args(), alias = ['se_r']),
Argument("se_e3", dict, descrpt_se_t_args(), alias = ['se_at', 'se_a_3be', 'se_t']),
Argument("se_a_tpe", dict, descrpt_se_a_tpe_args(), alias = ['se_a_ebd']),
Argument("se_a_tpe", dict, descrpt_se_a_tpe_args()),
Argument("se_a_ebd", dict, descrpt_se_a_ebd_args(), alias = ['se_a_ebd_type']),
Argument("hybrid", dict, descrpt_hybrid_args()),
], doc = doc_descrpt_type)

Expand All @@ -193,6 +228,7 @@ def fitting_ener():
doc_activation_function = f'The activation function in the fitting net. Supported activation functions are {list_to_doc(ACTIVATION_FN_DICT.keys())}'
doc_precision = f'The precision of the fitting net parameters, supported options are {list_to_doc(PRECISION_DICT.keys())}'
doc_resnet_dt = 'Whether to use a "Timestep" in the skip connection'
doc_share_fitting = 'Whether to share a single fitting net'
doc_trainable = 'Whether the parameters in the fitting net are trainable. This option can be\n\n\
- bool: True if all parameters of the fitting net are trainable, False otherwise.\n\n\
- list of bool: Specifies if each layer is trainable. Since the fitting net is composed by hidden layers followed by a output layer, the length of tihs list should be equal to len(`neuron`)+1.'
Expand All @@ -207,6 +243,7 @@ def fitting_ener():
Argument("activation_function", str, optional = True, default = 'tanh', doc = doc_activation_function),
Argument("precision", str, optional = True, default = 'float64', doc = doc_precision),
Argument("resnet_dt", bool, optional = True, default = True, doc = doc_resnet_dt),
Argument("share_fitting", bool, optional = True, default = False, doc = doc_share_fitting),
Argument("trainable", [list,bool], optional = True, default = True, doc = doc_trainable),
Argument("rcond", float, optional = True, default = 1e-3, doc = doc_rcond),
Argument("seed", [int,None], optional = True, doc = doc_seed),
Expand Down
Loading