From 93600932f87c01795a2ba88b8589ccab24e0f15b Mon Sep 17 00:00:00 2001 From: Han Wang Date: Tue, 1 Jun 2021 21:17:37 +0800 Subject: [PATCH 1/9] change default seed of constructors to `None` --- deepmd/descriptor/se_a.py | 2 +- deepmd/descriptor/se_a_ebd.py | 2 +- deepmd/descriptor/se_a_ef.py | 4 ++-- deepmd/descriptor/se_r.py | 2 +- deepmd/descriptor/se_t.py | 2 +- deepmd/fit/dipole.py | 2 +- deepmd/fit/ener.py | 2 +- deepmd/fit/polar.py | 4 ++-- deepmd/utils/type_embed.py | 2 +- 9 files changed, 11 insertions(+), 11 deletions(-) diff --git a/deepmd/descriptor/se_a.py b/deepmd/descriptor/se_a.py index aa8b88145c..8f985881cf 100644 --- a/deepmd/descriptor/se_a.py +++ b/deepmd/descriptor/se_a.py @@ -23,7 +23,7 @@ def __init__ (self, axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, - seed: int = 1, + seed: int = None, type_one_side: bool = True, exclude_types: List[int] = [], set_davg_zero: bool = False, diff --git a/deepmd/descriptor/se_a_ebd.py b/deepmd/descriptor/se_a_ebd.py index 0ea6d0646d..7630cabfc9 100644 --- a/deepmd/descriptor/se_a_ebd.py +++ b/deepmd/descriptor/se_a_ebd.py @@ -20,7 +20,7 @@ def __init__ (self, axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, - seed: int = 1, + seed: int = None, type_one_side: bool = True, type_nchanl : int = 2, type_nlayer : int = 1, diff --git a/deepmd/descriptor/se_a_ef.py b/deepmd/descriptor/se_a_ef.py index e97f011af0..0a288848f4 100644 --- a/deepmd/descriptor/se_a_ef.py +++ b/deepmd/descriptor/se_a_ef.py @@ -20,7 +20,7 @@ def __init__(self, axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, - seed: int = 1, + seed: int = None, type_one_side: bool = True, exclude_types: List[int] = [], set_davg_zero: bool = False, @@ -274,7 +274,7 @@ def __init__ (self, axis_neuron: int = 8, resnet_dt: bool = False, trainable: bool = True, - seed: int = 1, + seed: int = None, type_one_side: bool = True, exclude_types: List[int] = [], set_davg_zero: bool = False, diff --git a/deepmd/descriptor/se_r.py b/deepmd/descriptor/se_r.py index b3f56bf206..1cd0d59909 100644 --- a/deepmd/descriptor/se_r.py +++ b/deepmd/descriptor/se_r.py @@ -19,7 +19,7 @@ def __init__ (self, neuron: List[int] = [24,48,96], resnet_dt: bool = False, trainable: bool = True, - seed: int = 1, + seed: int = None, type_one_side: bool = True, exclude_types: List[int] = [], set_davg_zero: bool = False, diff --git a/deepmd/descriptor/se_t.py b/deepmd/descriptor/se_t.py index f680d83db6..bf892bed04 100644 --- a/deepmd/descriptor/se_t.py +++ b/deepmd/descriptor/se_t.py @@ -19,7 +19,7 @@ def __init__ (self, neuron: List[int] = [24,48,96], resnet_dt: bool = False, trainable: bool = True, - seed: int = 1, + seed: int = None, set_davg_zero: bool = False, activation_function: str = 'tanh', precision: str = 'default' diff --git a/deepmd/fit/dipole.py b/deepmd/fit/dipole.py index 623f8a782e..630d1bbb3a 100644 --- a/deepmd/fit/dipole.py +++ b/deepmd/fit/dipole.py @@ -21,7 +21,7 @@ def __init__ (self, neuron : List[int] = [120,120,120], resnet_dt : bool = True, sel_type : List[int] = None, - seed : int = 1, + seed : int = None, activation_function : str = 'tanh', precision : str = 'default' ) : diff --git a/deepmd/fit/ener.py b/deepmd/fit/ener.py index eae9fec738..09318d3e3f 100644 --- a/deepmd/fit/ener.py +++ b/deepmd/fit/ener.py @@ -24,7 +24,7 @@ def __init__ (self, rcond : float = 1e-3, tot_ener_zero : bool = False, trainable : List[bool] = None, - seed : int = 1, + seed : int = None, atom_ener : List[float] = [], activation_function : str = 'tanh', precision : str = 'default' diff --git a/deepmd/fit/polar.py b/deepmd/fit/polar.py index 78e51acee7..ab27244b4b 100644 --- a/deepmd/fit/polar.py +++ b/deepmd/fit/polar.py @@ -112,7 +112,7 @@ def __init__ (self, scale : List[float] = None, shift_diag : bool = True, # YWolfeee: will support the user to decide whether to use this function #diag_shift : List[float] = None, YWolfeee: will not support the user to assign a shift - seed : int = 1, + seed : int = None, activation_function : str = 'tanh', precision : str = 'default' ) -> None: @@ -373,7 +373,7 @@ def __init__ (self, fit_diag : bool = True, scale : List[float] = None, diag_shift : List[float] = None, - seed : int = 1, + seed : int = None, activation_function : str = 'tanh', precision : str = 'default' ) -> None: diff --git a/deepmd/utils/type_embed.py b/deepmd/utils/type_embed.py index 55083c6f9d..b61a1318f3 100644 --- a/deepmd/utils/type_embed.py +++ b/deepmd/utils/type_embed.py @@ -63,7 +63,7 @@ def __init__( activation_function: str = 'tanh', precision: str = 'default', trainable: bool = True, - seed: int = 1, + seed: int = None, )->None: """ Constructor From 882fdce771ff5aa4a4f6e8117db1263ec69ea949 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Wed, 2 Jun 2021 07:41:01 +0800 Subject: [PATCH 2/9] initialize the parameters in different layers of the embedding net with different seeds --- deepmd/descriptor/se_a.py | 9 +++++++-- deepmd/descriptor/se_r.py | 10 ++++++++-- deepmd/descriptor/se_t.py | 9 +++++++-- deepmd/utils/network.py | 27 +++++++++++++++++++++----- deepmd/utils/type_embed.py | 7 ++++++- source/tests/test_descrpt_se_a_type.py | 6 ++++-- source/tests/test_embedding_net.py | 15 +++++++++----- source/tests/test_model_se_a.py | 4 ++-- source/tests/test_model_se_a_aparam.py | 2 +- source/tests/test_model_se_a_fparam.py | 2 +- source/tests/test_model_se_a_srtab.py | 2 +- source/tests/test_model_se_a_type.py | 5 +++-- source/tests/test_model_se_r.py | 2 +- source/tests/test_model_se_t.py | 2 +- source/tests/test_polar_se_a.py | 2 +- source/tests/test_type_embed.py | 2 +- 16 files changed, 76 insertions(+), 30 deletions(-) diff --git a/deepmd/descriptor/se_a.py b/deepmd/descriptor/se_a.py index 8f985881cf..c6b78012cb 100644 --- a/deepmd/descriptor/se_a.py +++ b/deepmd/descriptor/se_a.py @@ -28,7 +28,8 @@ def __init__ (self, exclude_types: List[int] = [], set_davg_zero: bool = False, activation_function: str = 'tanh', - precision: str = 'default' + precision: str = 'default', + uniform_seed: bool = False ) -> None: """ Constructor @@ -62,6 +63,8 @@ def __init__ (self, The activation function in the embedding net. Supported options are {0} precision The precision of the embedding net parameters. Supported options are {1} + uniform_seed + Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed """ self.sel_a = sel self.rcut_r = rcut @@ -70,6 +73,7 @@ def __init__ (self, self.n_axis_neuron = axis_neuron self.filter_resnet_dt = resnet_dt self.seed = seed + self.uniform_seed = uniform_seed self.trainable = trainable self.filter_activation_fn = get_activation_func(activation_function) self.filter_precision = get_precision(precision) @@ -611,7 +615,8 @@ def _filter_lower( stddev = stddev, bavg = bavg, seed = seed, - trainable = trainable) + trainable = trainable, + uniform_seed = self.uniform_seed) else: w = tf.zeros((outputs_size[0], outputs_size[-1]), dtype=GLOBAL_TF_FLOAT_PRECISION) xyz_scatter = tf.matmul(xyz_scatter, w) diff --git a/deepmd/descriptor/se_r.py b/deepmd/descriptor/se_r.py index 1cd0d59909..bc003731a9 100644 --- a/deepmd/descriptor/se_r.py +++ b/deepmd/descriptor/se_r.py @@ -24,7 +24,9 @@ def __init__ (self, exclude_types: List[int] = [], set_davg_zero: bool = False, activation_function: str = 'tanh', - precision: str = 'default'): + precision: str = 'default', + uniform_seed: bool = False + ) -> None: """ Constructor @@ -53,6 +55,8 @@ def __init__ (self, The activation function in the embedding net. Supported options are {0} precision The precision of the embedding net parameters. Supported options are {1} + uniform_seed + Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed """ # args = ClassArg()\ # .add('sel', list, must = True) \ @@ -74,6 +78,7 @@ def __init__ (self, self.filter_neuron = neuron self.filter_resnet_dt = resnet_dt self.seed = seed + self.uniform_seed = uniform_seed self.trainable = trainable self.filter_activation_fn = get_activation_func(activation_function) self.filter_precision = get_precision(precision) @@ -469,7 +474,8 @@ def _filter_r(self, stddev = stddev, bavg = bavg, seed = seed, - trainable = trainable) + trainable = trainable, + uniform_seed = self.uniform_seed) else: w = tf.zeros((outputs_size[0], outputs_size[-1]), dtype=GLOBAL_TF_FLOAT_PRECISION) xyz_scatter = tf.matmul(xyz_scatter, w) diff --git a/deepmd/descriptor/se_t.py b/deepmd/descriptor/se_t.py index bf892bed04..828a1670ba 100644 --- a/deepmd/descriptor/se_t.py +++ b/deepmd/descriptor/se_t.py @@ -22,7 +22,8 @@ def __init__ (self, seed: int = None, set_davg_zero: bool = False, activation_function: str = 'tanh', - precision: str = 'default' + precision: str = 'default', + uniform_seed: bool = False ) -> None: """ Constructor @@ -50,6 +51,8 @@ def __init__ (self, The activation function in the embedding net. Supported options are {0} precision The precision of the embedding net parameters. Supported options are {1} + uniform_seed + Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed """ self.sel_a = sel self.rcut_r = rcut @@ -57,6 +60,7 @@ def __init__ (self, self.filter_neuron = neuron self.filter_resnet_dt = resnet_dt self.seed = seed + self.uniform_seed = uniform_seed self.trainable = trainable self.filter_activation_fn = get_activation_func(activation_function) self.filter_precision = get_precision(precision) @@ -493,7 +497,8 @@ def _filter(self, stddev = stddev, bavg = bavg, seed = seed, - trainable = trainable) + trainable = trainable, + uniform_seed = self.uniform_seed) # with natom x nei_type_i x nei_type_j x out_size ebd_env_ij = tf.reshape(ebd_env_ij, [-1, nei_type_i, nei_type_j, outputs_size[-1]]) # with natom x out_size diff --git a/deepmd/utils/network.py b/deepmd/utils/network.py index a19f0e0d6f..de1162456a 100644 --- a/deepmd/utils/network.py +++ b/deepmd/utils/network.py @@ -55,6 +55,11 @@ def one_layer(inputs, return hidden +def embedding_net_rand_seed_shift( + network_size +): + shift = 3 * (len(network_size) + 1) + return shift def embedding_net(xx, network_size, @@ -65,7 +70,8 @@ def embedding_net(xx, stddev = 1.0, bavg = 0.0, seed = None, - trainable = True): + trainable = True, + uniform_seed = False): """ Parameters ---------- @@ -92,19 +98,26 @@ def embedding_net(xx, """ input_shape = xx.get_shape().as_list() outputs_size = [input_shape[1]] + network_size - + for ii in range(1, len(outputs_size)): w = tf.get_variable('matrix_'+str(ii)+name_suffix, [outputs_size[ii - 1], outputs_size[ii]], precision, - tf.random_normal_initializer(stddev=stddev/np.sqrt(outputs_size[ii]+outputs_size[ii-1]), seed = seed), + tf.random_normal_initializer( + stddev=stddev/np.sqrt(outputs_size[ii]+outputs_size[ii-1]), + seed = seed if (seed is None or uniform_seed) else seed + ii*3+0 + ), trainable = trainable) variable_summaries(w, 'matrix_'+str(ii)+name_suffix) b = tf.get_variable('bias_'+str(ii)+name_suffix, [1, outputs_size[ii]], precision, - tf.random_normal_initializer(stddev=stddev, mean = bavg, seed = seed), + tf.random_normal_initializer( + stddev=stddev, + mean = bavg, + seed = seed if (seed is None or uniform_seed) else seed + 3*ii+1 + ), trainable = trainable) variable_summaries(b, 'bias_'+str(ii)+name_suffix) @@ -113,7 +126,11 @@ def embedding_net(xx, idt = tf.get_variable('idt_'+str(ii)+name_suffix, [1, outputs_size[ii]], precision, - tf.random_normal_initializer(stddev=0.001, mean = 1.0, seed = seed), + tf.random_normal_initializer( + stddev=0.001, + mean = 1.0, + seed = seed if (seed is None or uniform_seed) else seed + 3*ii+2 + ), trainable = trainable) variable_summaries(idt, 'idt_'+str(ii)+name_suffix) diff --git a/deepmd/utils/type_embed.py b/deepmd/utils/type_embed.py index b61a1318f3..1c36d522b4 100644 --- a/deepmd/utils/type_embed.py +++ b/deepmd/utils/type_embed.py @@ -64,6 +64,7 @@ def __init__( precision: str = 'default', trainable: bool = True, seed: int = None, + uniform_seed: bool = False, )->None: """ Constructor @@ -82,6 +83,8 @@ def __init__( If the weights of embedding net are trainable. seed Random seed for initializing the network parameters. + uniform_seed + Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed """ self.neuron = neuron self.seed = seed @@ -89,6 +92,7 @@ def __init__( self.filter_precision = get_precision(precision) self.filter_activation_fn = get_activation_func(activation_function) self.trainable = trainable + self.uniform_seed = uniform_seed def build( @@ -129,7 +133,8 @@ def build( precision = self.filter_precision, resnet_dt = self.filter_resnet_dt, seed = self.seed, - trainable = self.trainable) + trainable = self.trainable, + uniform_seed = self.uniform_seed) ebd_type = tf.reshape(ebd_type, [-1, self.neuron[-1]]) # nnei * neuron[-1] self.ebd_type = tf.identity(ebd_type, name ='t_typeebd') return self.ebd_type diff --git a/source/tests/test_descrpt_se_a_type.py b/source/tests/test_descrpt_se_a_type.py index b62d93e514..c35d952157 100644 --- a/source/tests/test_descrpt_se_a_type.py +++ b/source/tests/test_descrpt_se_a_type.py @@ -53,10 +53,11 @@ def test_descriptor_two_sides(self): neuron = typeebd_param['neuron'], resnet_dt = typeebd_param['resnet_dt'], seed = typeebd_param['seed'], + uniform_seed = True ) jdata['model']['descriptor'].pop('type', None) - descrpt = DescrptSeA(**jdata['model']['descriptor']) + descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed=True) # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) input_data = {'coord' : [test_data['coord']], @@ -161,10 +162,11 @@ def test_descriptor_one_side(self): neuron = typeebd_param['neuron'], resnet_dt = typeebd_param['resnet_dt'], seed = typeebd_param['seed'], + uniform_seed = True ) jdata['model']['descriptor'].pop('type', None) - descrpt = DescrptSeA(**jdata['model']['descriptor']) + descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed = True) # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) input_data = {'coord' : [test_data['coord']], diff --git a/source/tests/test_embedding_net.py b/source/tests/test_embedding_net.py index 58cc331001..4c07ea575e 100644 --- a/source/tests/test_embedding_net.py +++ b/source/tests/test_embedding_net.py @@ -25,7 +25,8 @@ def test_enlarger_net(self): network_size, tf.float64, name_suffix = 'enlarger_net', - seed = 1) + seed = 1, + uniform_seed = True) self.sess.run(tf.global_variables_initializer()) myout = self.sess.run(out) refout = [[-0.1482171, -0.14177827, -0.76181204, 0.21266767], @@ -42,7 +43,8 @@ def test_enlarger_net_1(self): network_size, tf.float64, name_suffix = 'enlarger_net_1', - seed = 1) + seed = 1, + uniform_seed = True) self.sess.run(tf.global_variables_initializer()) myout = self.sess.run(out) refout = [[ 0.10842905, -0.61623145, -1.46738788, -0.01921788], @@ -59,7 +61,8 @@ def test_enlarger_net_1_idt(self): tf.float64, name_suffix = 'enlarger_net_1_idt', resnet_dt = True, - seed = 1) + seed = 1, + uniform_seed = True) self.sess.run(tf.global_variables_initializer()) myout = self.sess.run(out) refout = [[ 0.10839754, -0.6161336, -1.46673253, -0.01927138], @@ -75,7 +78,8 @@ def test_enlarger_net_2(self): network_size, tf.float64, name_suffix = 'enlarger_net_2', - seed = 1) + seed = 1, + uniform_seed = True) self.sess.run(tf.global_variables_initializer()) myout = self.sess.run(out) refout = [[ 0.24023149, -0.66311811, -0.50951819, -0.36873654], @@ -93,7 +97,8 @@ def test_enlarger_net_2(self): tf.float64, name_suffix = 'enlarger_net_2_idt', resnet_dt = True, - seed = 1) + seed = 1, + uniform_seed = True) self.sess.run(tf.global_variables_initializer()) myout = self.sess.run(out) refout = [[ 0.2403889, -0.66290763, -0.50883586, -0.36869913], diff --git a/source/tests/test_model_se_a.py b/source/tests/test_model_se_a.py index 73ae4c7d1e..d8d4f29f4b 100644 --- a/source/tests/test_model_se_a.py +++ b/source/tests/test_model_se_a.py @@ -54,7 +54,7 @@ def test_model_atom_ener(self): numb_test = 1 jdata['model']['descriptor'].pop('type', None) - descrpt = DescrptSeA(**jdata['model']['descriptor']) + descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed=True) jdata['model']['fitting_net']['descrpt'] = descrpt fitting = EnerFitting(**jdata['model']['fitting_net']) model = EnerModel(descrpt, fitting) @@ -135,7 +135,7 @@ def test_model(self): numb_test = 1 jdata['model']['descriptor'].pop('type', None) - descrpt = DescrptSeA(**jdata['model']['descriptor']) + descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed=True) jdata['model']['fitting_net']['descrpt'] = descrpt fitting = EnerFitting(**jdata['model']['fitting_net']) model = EnerModel(descrpt, fitting) diff --git a/source/tests/test_model_se_a_aparam.py b/source/tests/test_model_se_a_aparam.py index 7a07a30d20..e4e9e87572 100644 --- a/source/tests/test_model_se_a_aparam.py +++ b/source/tests/test_model_se_a_aparam.py @@ -37,7 +37,7 @@ def test_model(self): numb_test = 1 jdata['model']['descriptor'].pop('type', None) - descrpt = DescrptSeA(**jdata['model']['descriptor']) + descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt fitting = EnerFitting(**jdata['model']['fitting_net']) model = EnerModel(descrpt, fitting) diff --git a/source/tests/test_model_se_a_fparam.py b/source/tests/test_model_se_a_fparam.py index a045c5e520..8ed1c03ba9 100644 --- a/source/tests/test_model_se_a_fparam.py +++ b/source/tests/test_model_se_a_fparam.py @@ -36,7 +36,7 @@ def test_model(self): numb_test = 1 jdata['model']['descriptor'].pop('type', None) - descrpt = DescrptSeA(**jdata['model']['descriptor']) + descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt fitting = EnerFitting(**jdata['model']['fitting_net']) # descrpt = DescrptSeA(jdata['model']['descriptor']) diff --git a/source/tests/test_model_se_a_srtab.py b/source/tests/test_model_se_a_srtab.py index 991bd9b7e5..b39ad41ec0 100644 --- a/source/tests/test_model_se_a_srtab.py +++ b/source/tests/test_model_se_a_srtab.py @@ -50,7 +50,7 @@ def test_model(self): numb_test = 1 jdata['model']['descriptor'].pop('type', None) - descrpt = DescrptSeA(**jdata['model']['descriptor']) + descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt fitting = EnerFitting(**jdata['model']['fitting_net']) # descrpt = DescrptSeA(jdata['model']['descriptor']) diff --git a/source/tests/test_model_se_a_type.py b/source/tests/test_model_se_a_type.py index ebb9a445cb..841a4b6a66 100644 --- a/source/tests/test_model_se_a_type.py +++ b/source/tests/test_model_se_a_type.py @@ -38,14 +38,15 @@ def test_model(self): numb_test = 1 jdata['model']['descriptor'].pop('type', None) - descrpt = DescrptSeA(**jdata['model']['descriptor']) + descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt fitting = EnerFitting(**jdata['model']['fitting_net']) typeebd_param = jdata['model']['type_embedding'] typeebd = TypeEmbedNet( neuron = typeebd_param['neuron'], resnet_dt = typeebd_param['resnet_dt'], - seed = typeebd_param['seed']) + seed = typeebd_param['seed'], + uniform_seed = True) model = EnerModel(descrpt, fitting, typeebd) # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) diff --git a/source/tests/test_model_se_r.py b/source/tests/test_model_se_r.py index f178dbda56..6569c5dd8e 100644 --- a/source/tests/test_model_se_r.py +++ b/source/tests/test_model_se_r.py @@ -36,7 +36,7 @@ def test_model(self): numb_test = 1 jdata['model']['descriptor'].pop('type', None) - descrpt = DescrptSeR(**jdata['model']['descriptor']) + descrpt = DescrptSeR(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt fitting = EnerFitting(**jdata['model']['fitting_net']) # fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) diff --git a/source/tests/test_model_se_t.py b/source/tests/test_model_se_t.py index 60ae54736b..7eae2619b5 100644 --- a/source/tests/test_model_se_t.py +++ b/source/tests/test_model_se_t.py @@ -36,7 +36,7 @@ def test_model(self): numb_test = 1 jdata['model']['descriptor'].pop('type', None) - descrpt = DescrptSeT(**jdata['model']['descriptor']) + descrpt = DescrptSeT(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt fitting = EnerFitting(**jdata['model']['fitting_net']) model = EnerModel(descrpt, fitting) diff --git a/source/tests/test_polar_se_a.py b/source/tests/test_polar_se_a.py index 8ca358b585..a22198c272 100644 --- a/source/tests/test_polar_se_a.py +++ b/source/tests/test_polar_se_a.py @@ -37,7 +37,7 @@ def test_model(self): jdata['model']['descriptor'].pop('type', None) jdata['model']['fitting_net'].pop('type', None) - descrpt = DescrptSeA(**jdata['model']['descriptor']) + descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt fitting = PolarFittingSeA(**jdata['model']['fitting_net']) model = PolarModel(descrpt, fitting) diff --git a/source/tests/test_type_embed.py b/source/tests/test_type_embed.py index 4773bf2c45..5f71d8ed93 100644 --- a/source/tests/test_type_embed.py +++ b/source/tests/test_type_embed.py @@ -27,7 +27,7 @@ def test_embed_atom_type(self): atom_embed[ii][jj], expected_out[ii][jj], places=10) def test_type_embed_net(self): - ten = TypeEmbedNet([2, 4, 8]) + ten = TypeEmbedNet([2, 4, 8], seed = 1, uniform_seed = True) type_embedding = ten.build(2) sess = tf.Session() sess.run(tf.global_variables_initializer()) From c872702067a2e95ba7c4246b60d44c27391fa092 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Wed, 2 Jun 2021 08:09:50 +0800 Subject: [PATCH 3/9] use different seeds for embedding net of different atom types --- deepmd/descriptor/se_a.py | 14 ++++++-------- deepmd/descriptor/se_r.py | 11 ++++++----- deepmd/descriptor/se_t.py | 9 +++++---- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/deepmd/descriptor/se_a.py b/deepmd/descriptor/se_a.py index c6b78012cb..849c09d065 100644 --- a/deepmd/descriptor/se_a.py +++ b/deepmd/descriptor/se_a.py @@ -9,7 +9,7 @@ from deepmd.env import GLOBAL_NP_FLOAT_PRECISION from deepmd.env import op_module from deepmd.env import default_tf_session_config -from deepmd.utils.network import embedding_net +from deepmd.utils.network import embedding_net, embedding_net_rand_seed_shift from deepmd.utils.tabulate import DeepTabulate from deepmd.utils.type_embed import embed_atom_type @@ -74,6 +74,7 @@ def __init__ (self, self.filter_resnet_dt = resnet_dt self.seed = seed self.uniform_seed = uniform_seed + self.seed_shift = embedding_net_rand_seed_shift(self.filter_neuron) self.trainable = trainable self.filter_activation_fn = get_activation_func(activation_function) self.filter_precision = get_precision(precision) @@ -463,7 +464,7 @@ def _pass_filter(self, [ 0, start_index* self.ndescrpt], [-1, natoms[2+type_i]* self.ndescrpt] ) inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) - layer, qmat = self._filter(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_'+str(type_i)+suffix, natoms=natoms, reuse=reuse, seed = self.seed, trainable = trainable, activation_fn = self.filter_activation_fn) + layer, qmat = self._filter(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_'+str(type_i)+suffix, natoms=natoms, reuse=reuse, trainable = trainable, activation_fn = self.filter_activation_fn) layer = tf.reshape(layer, [tf.shape(inputs)[0], natoms[2+type_i] * self.get_dim_out()]) qmat = tf.reshape(qmat, [tf.shape(inputs)[0], natoms[2+type_i] * self.get_dim_rot_mat_1() * 3]) output.append(layer) @@ -473,7 +474,7 @@ def _pass_filter(self, inputs_i = inputs inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) type_i = -1 - layer, qmat = self._filter(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_all'+suffix, natoms=natoms, reuse=reuse, seed = self.seed, trainable = trainable, activation_fn = self.filter_activation_fn, type_embedding=type_embedding) + layer, qmat = self._filter(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_all'+suffix, natoms=natoms, reuse=reuse, trainable = trainable, activation_fn = self.filter_activation_fn, type_embedding=type_embedding) layer = tf.reshape(layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()]) qmat = tf.reshape(qmat, [tf.shape(inputs)[0], natoms[0] * self.get_dim_rot_mat_1() * 3]) output.append(layer) @@ -571,7 +572,6 @@ def _filter_lower( activation_fn = None, bavg = 0.0, stddev = 1.0, - seed = None, trainable = True, suffix = '', ): @@ -614,9 +614,10 @@ def _filter_lower( name_suffix = suffix, stddev = stddev, bavg = bavg, - seed = seed, + seed = self.seed, trainable = trainable, uniform_seed = self.uniform_seed) + if not self.uniform_seed: self.seed += self.seed_shift else: w = tf.zeros((outputs_size[0], outputs_size[-1]), dtype=GLOBAL_TF_FLOAT_PRECISION) xyz_scatter = tf.matmul(xyz_scatter, w) @@ -636,7 +637,6 @@ def _filter( bavg=0.0, name='linear', reuse=None, - seed=None, trainable = True): nframes = tf.shape(tf.reshape(inputs, [-1, natoms[0], self.ndescrpt]))[0] # natom x (nei x 4) @@ -659,7 +659,6 @@ def _filter( activation_fn = activation_fn, stddev = stddev, bavg = bavg, - seed = seed, trainable = trainable, suffix = "_"+str(type_i)) if type_i == 0: @@ -679,7 +678,6 @@ def _filter( activation_fn = activation_fn, stddev = stddev, bavg = bavg, - seed = seed, trainable = trainable) # natom x nei x outputs_size # xyz_scatter = tf.concat(xyz_scatter_total, axis=1) diff --git a/deepmd/descriptor/se_r.py b/deepmd/descriptor/se_r.py index bc003731a9..5c24f5ac3c 100644 --- a/deepmd/descriptor/se_r.py +++ b/deepmd/descriptor/se_r.py @@ -8,7 +8,7 @@ from deepmd.env import GLOBAL_NP_FLOAT_PRECISION from deepmd.env import op_module from deepmd.env import default_tf_session_config -from deepmd.utils.network import embedding_net +from deepmd.utils.network import embedding_net, embedding_net_rand_seed_shift class DescrptSeR (): @docstring_parameter(list_to_doc(ACTIVATION_FN_DICT.keys()), list_to_doc(PRECISION_DICT.keys())) @@ -79,6 +79,7 @@ def __init__ (self, self.filter_resnet_dt = resnet_dt self.seed = seed self.uniform_seed = uniform_seed + self.seed_shift = embedding_net_rand_seed_shift(self.filter_neuron) self.trainable = trainable self.filter_activation_fn = get_activation_func(activation_function) self.filter_precision = get_precision(precision) @@ -379,7 +380,7 @@ def _pass_filter(self, [ 0, start_index* self.ndescrpt], [-1, natoms[2+type_i]* self.ndescrpt] ) inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) - layer = self._filter_r(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_'+str(type_i)+suffix, natoms=natoms, reuse=reuse, seed = self.seed, trainable = trainable, activation_fn = self.filter_activation_fn) + layer = self._filter_r(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_'+str(type_i)+suffix, natoms=natoms, reuse=reuse, trainable = trainable, activation_fn = self.filter_activation_fn) layer = tf.reshape(layer, [tf.shape(inputs)[0], natoms[2+type_i] * self.get_dim_out()]) output.append(layer) start_index += natoms[2+type_i] @@ -387,7 +388,7 @@ def _pass_filter(self, inputs_i = inputs inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) type_i = -1 - layer = self._filter_r(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_all'+suffix, natoms=natoms, reuse=reuse, seed = self.seed, trainable = trainable, activation_fn = self.filter_activation_fn) + layer = self._filter_r(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_all'+suffix, natoms=natoms, reuse=reuse, trainable = trainable, activation_fn = self.filter_activation_fn) layer = tf.reshape(layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()]) output.append(layer) output = tf.concat(output, axis = 1) @@ -447,7 +448,6 @@ def _filter_r(self, bavg=0.0, name='linear', reuse=None, - seed=None, trainable = True): # natom x nei outputs_size = [1] + self.filter_neuron @@ -473,9 +473,10 @@ def _filter_r(self, name_suffix = "_"+str(type_i), stddev = stddev, bavg = bavg, - seed = seed, + seed = self.seed, trainable = trainable, uniform_seed = self.uniform_seed) + if not self.uniform_seed: self.seed += self.seed_shift else: w = tf.zeros((outputs_size[0], outputs_size[-1]), dtype=GLOBAL_TF_FLOAT_PRECISION) xyz_scatter = tf.matmul(xyz_scatter, w) diff --git a/deepmd/descriptor/se_t.py b/deepmd/descriptor/se_t.py index 828a1670ba..e03dec69cf 100644 --- a/deepmd/descriptor/se_t.py +++ b/deepmd/descriptor/se_t.py @@ -8,7 +8,7 @@ from deepmd.env import GLOBAL_NP_FLOAT_PRECISION from deepmd.env import op_module from deepmd.env import default_tf_session_config -from deepmd.utils.network import embedding_net +from deepmd.utils.network import embedding_net, embedding_net_rand_seed_shift class DescrptSeT (): @docstring_parameter(list_to_doc(ACTIVATION_FN_DICT.keys()), list_to_doc(PRECISION_DICT.keys())) @@ -61,6 +61,7 @@ def __init__ (self, self.filter_resnet_dt = resnet_dt self.seed = seed self.uniform_seed = uniform_seed + self.seed_shift = embedding_net_rand_seed_shift(self.filter_neuron) self.trainable = trainable self.filter_activation_fn = get_activation_func(activation_function) self.filter_precision = get_precision(precision) @@ -376,7 +377,7 @@ def _pass_filter(self, inputs_i = inputs inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt]) type_i = -1 - layer, qmat = self._filter(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_all'+suffix, natoms=natoms, reuse=reuse, seed = self.seed, trainable = trainable, activation_fn = self.filter_activation_fn) + layer, qmat = self._filter(tf.cast(inputs_i, self.filter_precision), type_i, name='filter_type_all'+suffix, natoms=natoms, reuse=reuse, trainable = trainable, activation_fn = self.filter_activation_fn) layer = tf.reshape(layer, [tf.shape(inputs)[0], natoms[0] * self.get_dim_out()]) # qmat = tf.reshape(qmat, [tf.shape(inputs)[0], natoms[0] * self.get_dim_rot_mat_1() * 3]) output.append(layer) @@ -447,7 +448,6 @@ def _filter(self, bavg=0.0, name='linear', reuse=None, - seed=None, trainable = True): # natom x (nei x 4) shape = inputs.get_shape().as_list() @@ -496,9 +496,10 @@ def _filter(self, name_suffix = f"_{type_i}_{type_j}", stddev = stddev, bavg = bavg, - seed = seed, + seed = self.seed, trainable = trainable, uniform_seed = self.uniform_seed) + if not self.uniform_seed: self.seed += self.seed_shift # with natom x nei_type_i x nei_type_j x out_size ebd_env_ij = tf.reshape(ebd_env_ij, [-1, nei_type_i, nei_type_j, outputs_size[-1]]) # with natom x out_size From fcde5d9e5e2735c57a850f4b61511fc7dc1dd59f Mon Sep 17 00:00:00 2001 From: Han Wang Date: Wed, 2 Jun 2021 10:35:47 +0800 Subject: [PATCH 4/9] different seed for diffferent layers in the fitting nets. add UT for dipole model --- deepmd/fit/dipole.py | 19 ++++-- deepmd/fit/ener.py | 20 +++++-- deepmd/fit/polar.py | 22 ++++--- deepmd/utils/network.py | 23 +++++-- source/tests/test_dipole_se_a.py | 100 +++++++++++++++++++++++++++++++ source/tests/test_model_se_a.py | 4 +- source/tests/test_polar_se_a.py | 2 +- 7 files changed, 165 insertions(+), 25 deletions(-) create mode 100644 source/tests/test_dipole_se_a.py diff --git a/deepmd/fit/dipole.py b/deepmd/fit/dipole.py index 630d1bbb3a..df6e70bde2 100644 --- a/deepmd/fit/dipole.py +++ b/deepmd/fit/dipole.py @@ -5,7 +5,7 @@ from deepmd.env import tf from deepmd.common import add_data_requirement, get_activation_func, get_precision, ACTIVATION_FN_DICT, PRECISION_DICT, docstring_parameter from deepmd.utils.argcheck import list_to_doc -from deepmd.utils.network import one_layer +from deepmd.utils.network import one_layer, one_layer_rand_seed_shift from deepmd.descriptor import DescrptSeA from deepmd.env import global_cvt_2_tf_float @@ -23,8 +23,9 @@ def __init__ (self, sel_type : List[int] = None, seed : int = None, activation_function : str = 'tanh', - precision : str = 'default' - ) : + precision : str = 'default', + uniform_seed: bool = False + ) -> None: """ Constructor @@ -45,6 +46,8 @@ def __init__ (self, The activation function in the embedding net. Supported options are {0} precision : str The precision of the embedding net parameters. Supported options are {1} + uniform_seed + Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed """ if not isinstance(descrpt, DescrptSeA) : raise RuntimeError('DipoleFittingSeA only supports DescrptSeA') @@ -65,6 +68,8 @@ def __init__ (self, self.sel_type = [ii for ii in range(self.ntypes)] self.sel_type = sel_type self.seed = seed + self.uniform_seed = uniform_seed + self.seed_shift = one_layer_rand_seed_shift() self.fitting_activation_fn = get_activation_func(activation_function) self.fitting_precision = get_precision(precision) self.dim_rot_mat_1 = descrpt.get_dim_rot_mat_1() @@ -134,11 +139,13 @@ def build (self, layer = inputs_i for ii in range(0,len(self.n_neuron)) : if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] : - layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision) + layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed) else : - layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision) + layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed) + if not self.uniform_seed : self.seed += self.seed_shift # (nframes x natoms) x naxis - final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision) + final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision, uniform_seed = self.uniform_seed) + if not self.uniform_seed : self.seed += self.seed_shift # (nframes x natoms) x 1 * naxis final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], 1, self.dim_rot_mat_1]) # (nframes x natoms) x 1 x 3(coord) diff --git a/deepmd/fit/ener.py b/deepmd/fit/ener.py index 09318d3e3f..7fb85f0c7b 100644 --- a/deepmd/fit/ener.py +++ b/deepmd/fit/ener.py @@ -5,7 +5,7 @@ from deepmd.env import tf from deepmd.common import ClassArg, add_data_requirement, get_activation_func, get_precision, ACTIVATION_FN_DICT, PRECISION_DICT, docstring_parameter from deepmd.utils.argcheck import list_to_doc -from deepmd.utils.network import one_layer +from deepmd.utils.network import one_layer, one_layer_rand_seed_shift from deepmd.descriptor import DescrptLocFrame from deepmd.descriptor import DescrptSeA from deepmd.utils.type_embed import embed_atom_type @@ -27,7 +27,8 @@ def __init__ (self, seed : int = None, atom_ener : List[float] = [], activation_function : str = 'tanh', - precision : str = 'default' + precision : str = 'default', + uniform_seed: bool = False ) -> None: """ Constructor @@ -61,6 +62,8 @@ def __init__ (self, The activation function in the embedding net. Supported options are {0} precision The precision of the embedding net parameters. Supported options are {1} + uniform_seed + Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed """ # model param self.ntypes = descrpt.get_ntypes() @@ -83,6 +86,8 @@ def __init__ (self, self.resnet_dt = resnet_dt self.rcond = rcond self.seed = seed + self.uniform_seed = uniform_seed + self.seed_shift = one_layer_rand_seed_shift() self.tot_ener_zero = tot_ener_zero self.fitting_activation_fn = get_activation_func(activation_function) self.fitting_precision = get_precision(precision) @@ -251,7 +256,8 @@ def _build_lower( use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, - trainable = self.trainable[ii]) + trainable = self.trainable[ii], + uniform_seed = self.uniform_seed) else : layer = one_layer( layer, @@ -261,7 +267,9 @@ def _build_lower( seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, - trainable = self.trainable[ii]) + trainable = self.trainable[ii], + uniform_seed = self.uniform_seed) + if not self.uniform_seed : self.seed += self.seed_shift final_layer = one_layer( layer, 1, @@ -271,7 +279,9 @@ def _build_lower( reuse=reuse, seed = self.seed, precision = self.fitting_precision, - trainable = self.trainable[-1]) + trainable = self.trainable[-1], + uniform_seed = self.uniform_seed) + if not self.uniform_seed : self.seed += self.seed_shift return final_layer diff --git a/deepmd/fit/polar.py b/deepmd/fit/polar.py index ab27244b4b..a6dcdb3bf6 100644 --- a/deepmd/fit/polar.py +++ b/deepmd/fit/polar.py @@ -5,7 +5,7 @@ from deepmd.env import tf from deepmd.common import add_data_requirement, get_activation_func, get_precision, ACTIVATION_FN_DICT, PRECISION_DICT, docstring_parameter from deepmd.utils.argcheck import list_to_doc -from deepmd.utils.network import one_layer +from deepmd.utils.network import one_layer, one_layer_rand_seed_shift from deepmd.descriptor import DescrptLocFrame from deepmd.descriptor import DescrptSeA @@ -114,7 +114,8 @@ def __init__ (self, #diag_shift : List[float] = None, YWolfeee: will not support the user to assign a shift seed : int = None, activation_function : str = 'tanh', - precision : str = 'default' + precision : str = 'default', + uniform_seed: bool = False ) -> None: """ Constructor @@ -142,6 +143,8 @@ def __init__ (self, The activation function in the embedding net. Supported options are {0} precision : str The precision of the embedding net parameters. Supported options are {1} + uniform_seed + Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed """ if not isinstance(descrpt, DescrptSeA) : raise RuntimeError('PolarFittingSeA only supports DescrptSeA') @@ -163,6 +166,8 @@ def __init__ (self, self.sel_type = sel_type self.fit_diag = fit_diag self.seed = seed + self.uniform_seed = uniform_seed + self.seed_shift = one_layer_rand_seed_shift() #self.diag_shift = diag_shift self.shift_diag = shift_diag self.scale = scale @@ -313,16 +318,18 @@ def build (self, layer = inputs_i for ii in range(0,len(self.n_neuron)) : if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] : - layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision) + layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed) else : - layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision) + layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed) + if not self.uniform_seed : self.seed += self.seed_shift if self.fit_diag : bavg = np.zeros(self.dim_rot_mat_1) # bavg[0] = self.avgeig[0] # bavg[1] = self.avgeig[1] # bavg[2] = self.avgeig[2] # (nframes x natoms) x naxis - final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, bavg = bavg, precision = self.fitting_precision) + final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, bavg = bavg, precision = self.fitting_precision, uniform_seed = self.uniform_seed) + if not self.uniform_seed : self.seed += self.seed_shift # (nframes x natoms) x naxis final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.dim_rot_mat_1]) # (nframes x natoms) x naxis x naxis @@ -333,7 +340,8 @@ def build (self, # bavg[1*self.dim_rot_mat_1+1] = self.avgeig[1] # bavg[2*self.dim_rot_mat_1+2] = self.avgeig[2] # (nframes x natoms) x (naxis x naxis) - final_layer = one_layer(layer, self.dim_rot_mat_1*self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, bavg = bavg, precision = self.fitting_precision) + final_layer = one_layer(layer, self.dim_rot_mat_1*self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, bavg = bavg, precision = self.fitting_precision, uniform_seed = self.uniform_seed) + if not self.uniform_seed : self.seed += self.seed_shift # (nframes x natoms) x naxis x naxis final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.dim_rot_mat_1, self.dim_rot_mat_1]) # (nframes x natoms) x naxis x naxis @@ -375,7 +383,7 @@ def __init__ (self, diag_shift : List[float] = None, seed : int = None, activation_function : str = 'tanh', - precision : str = 'default' + precision : str = 'default' ) -> None: """ Constructor diff --git a/deepmd/utils/network.py b/deepmd/utils/network.py index de1162456a..b7d2fb24c2 100644 --- a/deepmd/utils/network.py +++ b/deepmd/utils/network.py @@ -3,6 +3,9 @@ from deepmd.env import tf from deepmd.env import GLOBAL_TF_FLOAT_PRECISION +def one_layer_rand_seed_shift(): + return 3 + def one_layer(inputs, outputs_size, activation_fn=tf.nn.tanh, @@ -14,19 +17,27 @@ def one_layer(inputs, seed=None, use_timestep = False, trainable = True, - useBN = False): + useBN = False, + uniform_seed = False): with tf.variable_scope(name, reuse=reuse): shape = inputs.get_shape().as_list() w = tf.get_variable('matrix', [shape[1], outputs_size], precision, - tf.random_normal_initializer(stddev=stddev/np.sqrt(shape[1]+outputs_size), seed = seed), + tf.random_normal_initializer( + stddev=stddev/np.sqrt(shape[1]+outputs_size), + seed = seed if (seed is None or uniform_seed) else seed + 0 + ), trainable = trainable) variable_summaries(w, 'matrix') b = tf.get_variable('bias', [outputs_size], precision, - tf.random_normal_initializer(stddev=stddev, mean = bavg, seed = seed), + tf.random_normal_initializer( + stddev=stddev, + mean = bavg, + seed = seed if (seed is None or uniform_seed) else seed + 1 + ), trainable = trainable) variable_summaries(b, 'bias') hidden = tf.matmul(inputs, w) + b @@ -34,7 +45,11 @@ def one_layer(inputs, idt = tf.get_variable('idt', [outputs_size], precision, - tf.random_normal_initializer(stddev=0.001, mean = 0.1, seed = seed), + tf.random_normal_initializer( + stddev=0.001, + mean = 0.1, + seed = seed if (seed is None or uniform_seed) else seed + 2 + ), trainable = trainable) variable_summaries(idt, 'idt') if activation_fn != None: diff --git a/source/tests/test_dipole_se_a.py b/source/tests/test_dipole_se_a.py new file mode 100644 index 0000000000..156d7bcdc6 --- /dev/null +++ b/source/tests/test_dipole_se_a.py @@ -0,0 +1,100 @@ +import dpdata,os,sys,unittest +import numpy as np +from deepmd.env import tf +from common import Data,gen_data, j_loader + +from deepmd.utils.data_system import DataSystem +from deepmd.descriptor import DescrptSeA +from deepmd.fit import DipoleFittingSeA +from deepmd.model import DipoleModel +from deepmd.common import j_must_have + +GLOBAL_ENER_FLOAT_PRECISION = tf.float64 +GLOBAL_TF_FLOAT_PRECISION = tf.float64 +GLOBAL_NP_FLOAT_PRECISION = np.float64 + +class TestModel(unittest.TestCase): + def setUp(self) : + gen_data() + + def test_model(self): + jfile = 'polar_se_a.json' + jdata = j_loader(jfile) + + systems = j_must_have(jdata, 'systems') + set_pfx = j_must_have(jdata, 'set_prefix') + batch_size = j_must_have(jdata, 'batch_size') + test_size = j_must_have(jdata, 'numb_test') + batch_size = 1 + test_size = 1 + stop_batch = j_must_have(jdata, 'stop_batch') + rcut = j_must_have (jdata['model']['descriptor'], 'rcut') + + data = DataSystem(systems, set_pfx, batch_size, test_size, rcut, run_opt = None) + + test_data = data.get_test () + numb_test = 1 + + jdata['model']['descriptor'].pop('type', None) + descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed = True) + jdata['model']['fitting_net'].pop('type', None) + jdata['model']['fitting_net'].pop('fit_diag', None) + jdata['model']['fitting_net']['descrpt'] = descrpt + fitting = DipoleFittingSeA(**jdata['model']['fitting_net'], uniform_seed = True) + model = DipoleModel(descrpt, fitting) + + # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) + input_data = {'coord' : [test_data['coord']], + 'box': [test_data['box']], + 'type': [test_data['type']], + 'natoms_vec' : [test_data['natoms_vec']], + 'default_mesh' : [test_data['default_mesh']], + 'fparam': [test_data['fparam']], + } + model._compute_input_stat(input_data) + + t_prop_c = tf.placeholder(tf.float32, [5], name='t_prop_c') + t_energy = tf.placeholder(GLOBAL_ENER_FLOAT_PRECISION, [None], name='t_energy') + t_force = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name='t_force') + t_virial = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name='t_virial') + t_atom_ener = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name='t_atom_ener') + t_coord = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None], name='i_coord') + t_type = tf.placeholder(tf.int32, [None], name='i_type') + t_natoms = tf.placeholder(tf.int32, [model.ntypes+2], name='i_natoms') + t_box = tf.placeholder(GLOBAL_TF_FLOAT_PRECISION, [None, 9], name='i_box') + t_mesh = tf.placeholder(tf.int32, [None], name='i_mesh') + is_training = tf.placeholder(tf.bool) + t_fparam = None + + model_pred \ + = model.build (t_coord, + t_type, + t_natoms, + t_box, + t_mesh, + t_fparam, + suffix = "polar_se_a", + reuse = False) + dipole = model_pred['dipole'] + + feed_dict_test = {t_prop_c: test_data['prop_c'], + t_coord: np.reshape(test_data['coord'] [:numb_test, :], [-1]), + t_box: test_data['box'] [:numb_test, :], + t_type: np.reshape(test_data['type'] [:numb_test, :], [-1]), + t_natoms: test_data['natoms_vec'], + t_mesh: test_data['default_mesh'], + is_training: False} + + sess = tf.Session() + sess.run(tf.global_variables_initializer()) + [p] = sess.run([dipole], feed_dict = feed_dict_test) + + p = p.reshape([-1]) + refp = [1.616802262298876514e+01,9.809535439521079425e+00,3.572312180768947854e-01,1.336308874095981203e+00,1.057908563208963848e+01,-5.999602350098874881e-01] + + places = 10 + for ii in range(p.size) : + self.assertAlmostEqual(p[ii], refp[ii], places = places) + + + diff --git a/source/tests/test_model_se_a.py b/source/tests/test_model_se_a.py index d8d4f29f4b..700986e308 100644 --- a/source/tests/test_model_se_a.py +++ b/source/tests/test_model_se_a.py @@ -56,7 +56,7 @@ def test_model_atom_ener(self): jdata['model']['descriptor'].pop('type', None) descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed=True) jdata['model']['fitting_net']['descrpt'] = descrpt - fitting = EnerFitting(**jdata['model']['fitting_net']) + fitting = EnerFitting(**jdata['model']['fitting_net'], uniform_seed=True) model = EnerModel(descrpt, fitting) test_data['natoms_vec'] = [1, 1, 1, 0] @@ -137,7 +137,7 @@ def test_model(self): jdata['model']['descriptor'].pop('type', None) descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed=True) jdata['model']['fitting_net']['descrpt'] = descrpt - fitting = EnerFitting(**jdata['model']['fitting_net']) + fitting = EnerFitting(**jdata['model']['fitting_net'], uniform_seed=True) model = EnerModel(descrpt, fitting) # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) diff --git a/source/tests/test_polar_se_a.py b/source/tests/test_polar_se_a.py index a22198c272..4049aee717 100644 --- a/source/tests/test_polar_se_a.py +++ b/source/tests/test_polar_se_a.py @@ -39,7 +39,7 @@ def test_model(self): jdata['model']['fitting_net'].pop('type', None) descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt - fitting = PolarFittingSeA(**jdata['model']['fitting_net']) + fitting = PolarFittingSeA(**jdata['model']['fitting_net'], uniform_seed = True) model = PolarModel(descrpt, fitting) # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) From aabbfd9d7fbaa316f2918b7013635b697be86c10 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Wed, 2 Jun 2021 15:07:58 +0800 Subject: [PATCH 5/9] support EF, fix bugs --- deepmd/descriptor/se_a_ef.py | 40 +++++++++++++++---------- source/tests/test_descrpt_sea_ef_rot.py | 4 +-- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/deepmd/descriptor/se_a_ef.py b/deepmd/descriptor/se_a_ef.py index 0a288848f4..bdba470d39 100644 --- a/deepmd/descriptor/se_a_ef.py +++ b/deepmd/descriptor/se_a_ef.py @@ -25,7 +25,8 @@ def __init__(self, exclude_types: List[int] = [], set_davg_zero: bool = False, activation_function: str = 'tanh', - precision: str = 'default' + precision: str = 'default', + uniform_seed = False ) -> None: """ Constructor @@ -59,6 +60,8 @@ def __init__(self, The activation function in the embedding net. Supported options are {0} precision The precision of the embedding net parameters. Supported options are {1} + uniform_seed + Only for the purpose of backward compatibility, retrieves the old behavior of using the random seed """ self.descrpt_para = DescrptSeAEfLower( op_module.descrpt_se_a_ef_para, @@ -75,6 +78,7 @@ def __init__(self, set_davg_zero, activation_function, precision, + uniform_seed, ) self.descrpt_vert = DescrptSeAEfLower( op_module.descrpt_se_a_ef_vert, @@ -91,6 +95,7 @@ def __init__(self, set_davg_zero, activation_function, precision, + uniform_seed, ) def get_rcut (self) -> float: @@ -279,22 +284,25 @@ def __init__ (self, exclude_types: List[int] = [], set_davg_zero: bool = False, activation_function: str = 'tanh', - precision: str = 'default' + precision: str = 'default', + uniform_seed : bool = False, ) -> None: - DescrptSeA.__init__(self, - rcut, - rcut_smth, - sel, - neuron, - axis_neuron, - resnet_dt, - trainable, - seed, - type_one_side, - exclude_types, - set_davg_zero, - activation_function, - precision + DescrptSeA.__init__( + self, + rcut, + rcut_smth, + sel, + neuron, + axis_neuron, + resnet_dt, + trainable, + seed, + type_one_side, + exclude_types, + set_davg_zero, + activation_function, + precision, + uniform_seed ) # DescrptSeA.__init__(self, **jdata) # args = ClassArg()\ diff --git a/source/tests/test_descrpt_sea_ef_rot.py b/source/tests/test_descrpt_sea_ef_rot.py index 31d2fafa3f..34c7434a66 100644 --- a/source/tests/test_descrpt_sea_ef_rot.py +++ b/source/tests/test_descrpt_sea_ef_rot.py @@ -60,9 +60,9 @@ def build_efv(self, efield = self._normalize_3d(efield) efield = tf.reshape(efield, [-1, tnatoms[0] * 3]) if op != op_module.prod_env_mat_a : - descrpt = DescrptSeAEfLower(op, **{'sel':self.sel_a, 'rcut': 6, 'rcut_smth' : 5.5}) + descrpt = DescrptSeAEfLower(op, **{'sel':self.sel_a, 'rcut': 6, 'rcut_smth' : 5.5, 'seed': 1, 'uniform_seed': True}) else: - descrpt = DescrptSeA(**{'sel':self.sel_a, 'rcut': 6, 'rcut_smth' : 0.5}) + descrpt = DescrptSeA(**{'sel':self.sel_a, 'rcut': 6, 'rcut_smth' : 0.5, 'seed': 1, 'uniform_seed': True}) dout = descrpt.build(dcoord, dtype, tnatoms, From a631c11515e4cdb8b9c90c6cee1292fd046b7e47 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Wed, 2 Jun 2021 15:40:34 +0800 Subject: [PATCH 6/9] fix bugs in wfc --- deepmd/fit/wfc.py | 15 ++++++++++----- source/tests/test_wfc.py | 1 + 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/deepmd/fit/wfc.py b/deepmd/fit/wfc.py index fc28744fb1..1c3af65594 100644 --- a/deepmd/fit/wfc.py +++ b/deepmd/fit/wfc.py @@ -5,7 +5,7 @@ from deepmd.env import tf from deepmd.common import ClassArg, add_data_requirement, get_activation_func, get_precision, ACTIVATION_FN_DICT, PRECISION_DICT, docstring_parameter from deepmd.utils.argcheck import list_to_doc -from deepmd.utils.network import one_layer +from deepmd.utils.network import one_layer, one_layer_rand_seed_shift from deepmd.descriptor import DescrptLocFrame from deepmd.env import global_cvt_2_tf_float @@ -27,13 +27,16 @@ def __init__ (self, jdata, descrpt): .add('sel_type', [list,int], default = [ii for ii in range(self.ntypes)], alias = 'wfc_type')\ .add('seed', int)\ .add("activation_function", str, default = "tanh")\ - .add('precision', str, default = "default") + .add('precision', str, default = "default")\ + .add('uniform_seed', bool, default = False) class_data = args.parse(jdata) self.n_neuron = class_data['neuron'] self.resnet_dt = class_data['resnet_dt'] self.wfc_numb = class_data['wfc_numb'] self.sel_type = class_data['sel_type'] self.seed = class_data['seed'] + self.uniform_seed = class_data['uniform_seed'] + self.seed_shift = one_layer_rand_seed_shift() self.fitting_activation_fn = get_activation_func(class_data["activation_function"]) self.fitting_precision = get_precision(class_data['precision']) self.useBN = False @@ -75,11 +78,13 @@ def build (self, layer = inputs_i for ii in range(0,len(self.n_neuron)) : if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] : - layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision) + layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed) else : - layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision) + layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed) + if not self.uniform_seed : self.seed += self.seed_shift # (nframes x natoms) x (nwfc x 3) - final_layer = one_layer(layer, self.wfc_numb * 3, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision) + final_layer = one_layer(layer, self.wfc_numb * 3, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, precision = self.fitting_precision, uniform_seed = self.uniform_seed) + if not self.uniform_seed : self.seed += self.seed_shift # (nframes x natoms) x nwfc(wc) x 3(coord_local) final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.wfc_numb, 3]) # (nframes x natoms) x nwfc(wc) x 3(coord) diff --git a/source/tests/test_wfc.py b/source/tests/test_wfc.py index 9038c6d177..c3759429f6 100644 --- a/source/tests/test_wfc.py +++ b/source/tests/test_wfc.py @@ -38,6 +38,7 @@ def test_model(self): jdata['model']['descriptor'].pop('type', None) jdata['model']['descriptor'].pop('_comment', None) descrpt = DescrptLocFrame(**jdata['model']['descriptor']) + jdata['model']['fitting_net']['uniform_seed'] = True fitting = WFCFitting(jdata['model']['fitting_net'], descrpt) model = WFCModel(descrpt, fitting) From a8762c786b246910c0b40e2da3a8f58ada371e1c Mon Sep 17 00:00:00 2001 From: Han Wang Date: Wed, 2 Jun 2021 16:28:45 +0800 Subject: [PATCH 7/9] fix bugs in tests --- source/tests/test_fitting_ener_type.py | 4 ++-- source/tests/test_model_loc_frame.py | 3 ++- source/tests/test_model_se_a_aparam.py | 2 +- source/tests/test_model_se_a_fparam.py | 2 +- source/tests/test_model_se_a_srtab.py | 2 +- source/tests/test_model_se_a_type.py | 2 +- source/tests/test_model_se_r.py | 2 +- source/tests/test_model_se_t.py | 2 +- 8 files changed, 10 insertions(+), 9 deletions(-) diff --git a/source/tests/test_fitting_ener_type.py b/source/tests/test_fitting_ener_type.py index f7209be37c..71312188a3 100644 --- a/source/tests/test_fitting_ener_type.py +++ b/source/tests/test_fitting_ener_type.py @@ -43,9 +43,9 @@ def test_fitting(self): jdata['model']['descriptor']['axis_neuron'] = 2 jdata['model']['descriptor'].pop('type', None) - descrpt = DescrptSeA(**jdata['model']['descriptor']) + descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt - fitting = EnerFitting(**jdata['model']['fitting_net']) + fitting = EnerFitting(**jdata['model']['fitting_net'], uniform_seed = True) # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) input_data = {'coord' : [test_data['coord']], diff --git a/source/tests/test_model_loc_frame.py b/source/tests/test_model_loc_frame.py index c94b71be01..fe4d128c08 100644 --- a/source/tests/test_model_loc_frame.py +++ b/source/tests/test_model_loc_frame.py @@ -40,7 +40,8 @@ def test_model(self): descrpt = DescrptLocFrame(**jdata['model']['descriptor']) fitting = EnerFitting(descrpt, neuron = [240, 120, 60, 30, 10], - seed = 1) + seed = 1, + uniform_seed = True) model = EnerModel( descrpt, fitting, diff --git a/source/tests/test_model_se_a_aparam.py b/source/tests/test_model_se_a_aparam.py index e4e9e87572..77acb2143f 100644 --- a/source/tests/test_model_se_a_aparam.py +++ b/source/tests/test_model_se_a_aparam.py @@ -39,7 +39,7 @@ def test_model(self): jdata['model']['descriptor'].pop('type', None) descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt - fitting = EnerFitting(**jdata['model']['fitting_net']) + fitting = EnerFitting(**jdata['model']['fitting_net'], uniform_seed = True) model = EnerModel(descrpt, fitting) # model._compute_dstats([test_data['coord']], [test_data['box']], [test_data['type']], [test_data['natoms_vec']], [test_data['default_mesh']]) diff --git a/source/tests/test_model_se_a_fparam.py b/source/tests/test_model_se_a_fparam.py index 8ed1c03ba9..b69ee713e6 100644 --- a/source/tests/test_model_se_a_fparam.py +++ b/source/tests/test_model_se_a_fparam.py @@ -38,7 +38,7 @@ def test_model(self): jdata['model']['descriptor'].pop('type', None) descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt - fitting = EnerFitting(**jdata['model']['fitting_net']) + fitting = EnerFitting(**jdata['model']['fitting_net'], uniform_seed = True) # descrpt = DescrptSeA(jdata['model']['descriptor']) # fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) model = EnerModel(descrpt, fitting) diff --git a/source/tests/test_model_se_a_srtab.py b/source/tests/test_model_se_a_srtab.py index b39ad41ec0..d04f011acd 100644 --- a/source/tests/test_model_se_a_srtab.py +++ b/source/tests/test_model_se_a_srtab.py @@ -52,7 +52,7 @@ def test_model(self): jdata['model']['descriptor'].pop('type', None) descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt - fitting = EnerFitting(**jdata['model']['fitting_net']) + fitting = EnerFitting(**jdata['model']['fitting_net'], uniform_seed = True) # descrpt = DescrptSeA(jdata['model']['descriptor']) # fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) model = EnerModel( diff --git a/source/tests/test_model_se_a_type.py b/source/tests/test_model_se_a_type.py index 841a4b6a66..393cb4c86e 100644 --- a/source/tests/test_model_se_a_type.py +++ b/source/tests/test_model_se_a_type.py @@ -40,7 +40,7 @@ def test_model(self): jdata['model']['descriptor'].pop('type', None) descrpt = DescrptSeA(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt - fitting = EnerFitting(**jdata['model']['fitting_net']) + fitting = EnerFitting(**jdata['model']['fitting_net'], uniform_seed = True) typeebd_param = jdata['model']['type_embedding'] typeebd = TypeEmbedNet( neuron = typeebd_param['neuron'], diff --git a/source/tests/test_model_se_r.py b/source/tests/test_model_se_r.py index 6569c5dd8e..5172746cc5 100644 --- a/source/tests/test_model_se_r.py +++ b/source/tests/test_model_se_r.py @@ -38,7 +38,7 @@ def test_model(self): jdata['model']['descriptor'].pop('type', None) descrpt = DescrptSeR(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt - fitting = EnerFitting(**jdata['model']['fitting_net']) + fitting = EnerFitting(**jdata['model']['fitting_net'], uniform_seed = True) # fitting = EnerFitting(jdata['model']['fitting_net'], descrpt) model = EnerModel(descrpt, fitting) diff --git a/source/tests/test_model_se_t.py b/source/tests/test_model_se_t.py index 7eae2619b5..78b06ea7ab 100644 --- a/source/tests/test_model_se_t.py +++ b/source/tests/test_model_se_t.py @@ -38,7 +38,7 @@ def test_model(self): jdata['model']['descriptor'].pop('type', None) descrpt = DescrptSeT(**jdata['model']['descriptor'], uniform_seed = True) jdata['model']['fitting_net']['descrpt'] = descrpt - fitting = EnerFitting(**jdata['model']['fitting_net']) + fitting = EnerFitting(**jdata['model']['fitting_net'], uniform_seed = True) model = EnerModel(descrpt, fitting) input_data = {'coord' : [test_data['coord']], From 95f1a4148c5382a4f2759f81f77d19c4df546c70 Mon Sep 17 00:00:00 2001 From: Han Wang Date: Wed, 2 Jun 2021 17:44:54 +0800 Subject: [PATCH 8/9] fig bug in test: duplicated model name --- source/tests/test_dipole_se_a.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/tests/test_dipole_se_a.py b/source/tests/test_dipole_se_a.py index 156d7bcdc6..eb4396dec1 100644 --- a/source/tests/test_dipole_se_a.py +++ b/source/tests/test_dipole_se_a.py @@ -73,7 +73,7 @@ def test_model(self): t_box, t_mesh, t_fparam, - suffix = "polar_se_a", + suffix = "dipole_se_a", reuse = False) dipole = model_pred['dipole'] From 0a809e52ca7445b1461d863d443016b38ec229bd Mon Sep 17 00:00:00 2001 From: Han Wang Date: Thu, 3 Jun 2021 07:07:55 +0800 Subject: [PATCH 9/9] Fix typo in the type hint of `exclude_types` --- deepmd/descriptor/se_a.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepmd/descriptor/se_a.py b/deepmd/descriptor/se_a.py index 849c09d065..eb18c68fb2 100644 --- a/deepmd/descriptor/se_a.py +++ b/deepmd/descriptor/se_a.py @@ -25,7 +25,7 @@ def __init__ (self, trainable: bool = True, seed: int = None, type_one_side: bool = True, - exclude_types: List[int] = [], + exclude_types: List[List[int]] = [], set_davg_zero: bool = False, activation_function: str = 'tanh', precision: str = 'default',