From 786a7e99b23e738f077cf1171027dff9253e126c Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 9 Dec 2022 15:04:47 -0500 Subject: [PATCH 1/3] store energy bias with interface precision Signed-off-by: Jinzhe Zeng --- deepmd/fit/ener.py | 45 +++++++++++++++++++------------------------ deepmd/model/ener.py | 2 +- deepmd/model/multi.py | 2 +- 3 files changed, 22 insertions(+), 27 deletions(-) diff --git a/deepmd/fit/ener.py b/deepmd/fit/ener.py index ebac8dbb07..a1008d4e49 100644 --- a/deepmd/fit/ener.py +++ b/deepmd/fit/ener.py @@ -140,7 +140,7 @@ def __init__ (self, self.atom_ener_v = atom_ener for at, ae in enumerate(atom_ener): if ae is not None: - self.atom_ener.append(tf.constant(ae, self.fitting_precision, name = "atom_%d_ener" % at)) + self.atom_ener.append(tf.constant(ae, GLOBAL_TF_FLOAT_PRECISION, name = "atom_%d_ener" % at)) else: self.atom_ener.append(None) self.useBN = False @@ -286,6 +286,7 @@ def compute_input_stats(self, def _compute_std (self, sumv2, sumv, sumn) : return np.sqrt(sumv2/sumn - np.multiply(sumv/sumn, sumv/sumn)) + @cast_precision def _build_lower( self, start_index, @@ -368,7 +369,6 @@ def _build_lower( return final_layer - @cast_precision def build (self, inputs : tf.Tensor, natoms : tf.Tensor, @@ -425,12 +425,11 @@ def build (self, t_daparam = tf.constant(self.numb_aparam, name = 'daparam', dtype = tf.int32) - if type_embedding is not None: - self.t_bias_atom_e = tf.get_variable('t_bias_atom_e', - self.bias_atom_e.shape, - dtype=self.fitting_precision, - trainable=False, - initializer=tf.constant_initializer(self.bias_atom_e)) + self.t_bias_atom_e = tf.get_variable('t_bias_atom_e', + self.bias_atom_e.shape, + dtype=GLOBAL_TF_FLOAT_PRECISION, + trainable=False, + initializer=tf.constant_initializer(self.bias_atom_e)) if self.numb_fparam > 0: t_fparam_avg = tf.get_variable('t_fparam_avg', self.numb_fparam, @@ -460,9 +459,9 @@ def build (self, nframes = input_dict.get('nframes') if nframes is not None: # like inputs, but we don't want to add a dependency on inputs - inputs_zero = tf.zeros((nframes, natoms[0], self.dim_descrpt), dtype=self.fitting_precision) + inputs_zero = tf.zeros((nframes, natoms[0], self.dim_descrpt), dtype=GLOBAL_TF_FLOAT_PRECISION) else: - inputs_zero = tf.zeros_like(inputs, dtype=self.fitting_precision) + inputs_zero = tf.zeros_like(inputs, dtype=GLOBAL_TF_FLOAT_PRECISION) if bias_atom_e is not None : @@ -480,9 +479,9 @@ def build (self, aparam = (aparam - t_aparam_avg) * t_aparam_istd aparam = tf.reshape(aparam, [-1, self.numb_aparam * natoms[0]]) + atype_nall = tf.reshape(atype, [-1, natoms[1]]) + self.atype_nloc = tf.reshape(tf.slice(atype_nall, [0, 0], [-1, natoms[0]]), [-1]) ## lammps will make error if type_embedding is not None: - atype_nall = tf.reshape(atype, [-1, natoms[1]]) - self.atype_nloc = tf.reshape(tf.slice(atype_nall, [0, 0], [-1, natoms[0]]), [-1]) ## lammps will make error atype_embed = tf.nn.embedding_lookup(type_embedding, self.atype_nloc) else: atype_embed = None @@ -493,23 +492,19 @@ def build (self, start_index = 0 outs_list = [] for type_i in range(self.ntypes): - if bias_atom_e is None : - type_bias_ae = 0.0 - else : - type_bias_ae = bias_atom_e[type_i] final_layer = self._build_lower( start_index, natoms[2+type_i], inputs, fparam, aparam, - bias_atom_e=type_bias_ae, suffix='_type_'+str(type_i)+suffix, reuse=reuse + bias_atom_e=0., suffix='_type_'+str(type_i)+suffix, reuse=reuse ) # concat the results if type_i < len(self.atom_ener) and self.atom_ener[type_i] is not None: zero_layer = self._build_lower( start_index, natoms[2+type_i], inputs_zero, fparam, aparam, - bias_atom_e=type_bias_ae, suffix='_type_'+str(type_i)+suffix, reuse=True + bias_atom_e=0., suffix='_type_'+str(type_i)+suffix, reuse=True ) - final_layer += self.atom_ener[type_i] - zero_layer + final_layer -= zero_layer final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i]]) outs_list.append(final_layer) start_index += natoms[2+type_i] @@ -518,7 +513,7 @@ def build (self, outs = tf.concat(outs_list, axis = 1) # with type embedding else: - atype_embed = tf.cast(atype_embed, self.fitting_precision) + atype_embed = tf.cast(atype_embed, GLOBAL_TF_FLOAT_PRECISION) type_shape = atype_embed.get_shape().as_list() inputs = tf.concat( [tf.reshape(inputs,[-1,self.dim_descrpt]),atype_embed], @@ -547,11 +542,11 @@ def build (self, # atomic energy will be stored in `self.t_bias_atom_e` which is not trainable final_layer -= zero_layer outs = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[0]]) - # add bias - self.atom_ener_before = outs - self.add_type = tf.reshape(tf.nn.embedding_lookup(self.t_bias_atom_e, self.atype_nloc), [tf.shape(inputs)[0], natoms[0]]) - outs = outs + self.add_type - self.atom_ener_after = outs + # add bias + self.atom_ener_before = outs + self.add_type = tf.reshape(tf.nn.embedding_lookup(self.t_bias_atom_e, self.atype_nloc), [tf.shape(inputs)[0], natoms[0]]) + outs = outs + self.add_type + self.atom_ener_after = outs if self.tot_ener_zero: force_tot_ener = 0.0 diff --git a/deepmd/model/ener.py b/deepmd/model/ener.py index 170c715fad..b07121e30b 100644 --- a/deepmd/model/ener.py +++ b/deepmd/model/ener.py @@ -170,7 +170,7 @@ def build (self, suffix = suffix, ) input_dict['type_embedding'] = type_embedding - input_dict['atype'] = atype_ + input_dict['atype'] = atype_ if frz_model == None: dout \ diff --git a/deepmd/model/multi.py b/deepmd/model/multi.py index ff041c9dfb..5d4cae258c 100644 --- a/deepmd/model/multi.py +++ b/deepmd/model/multi.py @@ -195,7 +195,7 @@ def build(self, suffix=suffix, ) input_dict['type_embedding'] = type_embedding - input_dict['atype'] = atype_ + input_dict['atype'] = atype_ dout \ = self.descrpt.build(coord_, From 721c8999dd61edbccc269463fc4802af9fb0c2d2 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Fri, 9 Dec 2022 15:49:57 -0500 Subject: [PATCH 2/3] fix tests Signed-off-by: Jinzhe Zeng --- source/tests/test_model_se_a.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/tests/test_model_se_a.py b/source/tests/test_model_se_a.py index d7227bd54e..28caed4fd9 100644 --- a/source/tests/test_model_se_a.py +++ b/source/tests/test_model_se_a.py @@ -69,7 +69,7 @@ def test_model_atom_ener(self): 'default_mesh' : [test_data['default_mesh']] } model._compute_input_stat(input_data) - model.descrpt.bias_atom_e = data.compute_energy_shift() + model.fitting.bias_atom_e = np.array(set_atom_ener) t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c') t_energy = tf.placeholder(GLOBAL_ENER_FLOAT_PRECISION, [None], name='t_energy') From 706448ff786f7edd7b81b5791a5ce39c573f7cc6 Mon Sep 17 00:00:00 2001 From: Jinzhe Zeng Date: Mon, 12 Dec 2022 16:15:56 -0500 Subject: [PATCH 3/3] revise the comment Signed-off-by: Jinzhe Zeng --- deepmd/fit/ener.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/fit/ener.py b/deepmd/fit/ener.py index a1008d4e49..75c13dc9ab 100644 --- a/deepmd/fit/ener.py +++ b/deepmd/fit/ener.py @@ -587,7 +587,7 @@ def init_variables(self, try: self.bias_atom_e = get_tensor_by_name_from_graph(graph, 'fitting_attr%s/t_bias_atom_e' % suffix) except GraphWithoutTensorError: - # model without type_embedding has no t_bias_atom_e + # for compatibility, old models has no t_bias_atom_e pass def change_energy_bias(self,