Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 8 additions & 4 deletions deepmd/descriptor/se_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,6 @@ def __init__ (self,
self.exclude_types.add((tt[1], tt[0]))
self.set_davg_zero = set_davg_zero
self.type_one_side = type_one_side
if self.type_one_side and len(exclude_types) != 0:
raise RuntimeError('"type_one_side" is not compatible with "exclude_types"')

# descrpt config
self.sel_r = [ 0 for ii in range(len(self.sel_a)) ]
Expand Down Expand Up @@ -552,13 +550,19 @@ def _pass_filter(self,
inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]])
output = []
output_qmat = []
if not self.type_one_side and type_embedding is None:
if not (self.type_one_side and len(self.exclude_types) == 0) and type_embedding is None:
for type_i in range(self.ntypes):
inputs_i = tf.slice (inputs,
[ 0, start_index* self.ndescrpt],
[-1, natoms[2+type_i]* self.ndescrpt] )
inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt])
layer, qmat = self._filter(inputs_i, type_i, name='filter_type_'+str(type_i)+suffix, natoms=natoms, reuse=reuse, trainable = trainable, activation_fn = self.filter_activation_fn)
if self.type_one_side:
# reuse NN parameters for all types to support type_one_side along with exclude_types
reuse = tf.AUTO_REUSE
filter_name = 'filter_type_all'+suffix
else:
filter_name = 'filter_type_'+str(type_i)+suffix
layer, qmat = self._filter(inputs_i, type_i, name=filter_name, natoms=natoms, reuse=reuse, trainable = trainable, activation_fn = self.filter_activation_fn)
layer = tf.reshape(layer, [tf.shape(inputs)[0], natoms[2+type_i] * self.get_dim_out()])
qmat = tf.reshape(qmat, [tf.shape(inputs)[0], natoms[2+type_i] * self.get_dim_rot_mat_1() * 3])
output.append(layer)
Expand Down
10 changes: 8 additions & 2 deletions deepmd/descriptor/se_r.py
Original file line number Diff line number Diff line change
Expand Up @@ -443,13 +443,19 @@ def _pass_filter(self,
start_index = 0
inputs = tf.reshape(inputs, [-1, self.ndescrpt * natoms[0]])
output = []
if not self.type_one_side:
if not (self.type_one_side and len(self.exclude_types) == 0):
for type_i in range(self.ntypes):
inputs_i = tf.slice (inputs,
[ 0, start_index* self.ndescrpt],
[-1, natoms[2+type_i]* self.ndescrpt] )
inputs_i = tf.reshape(inputs_i, [-1, self.ndescrpt])
layer = self._filter_r(inputs_i, type_i, name='filter_type_'+str(type_i)+suffix, natoms=natoms, reuse=reuse, trainable = trainable, activation_fn = self.filter_activation_fn)
if self.type_one_side:
# reuse NN parameters for all types to support type_one_side along with exclude_types
reuse = tf.AUTO_REUSE
filter_name = 'filter_type_all'+suffix
else:
filter_name = 'filter_type_'+str(type_i)+suffix
layer = self._filter_r(inputs_i, type_i, name=filter_name, natoms=natoms, reuse=reuse, trainable = trainable, activation_fn = self.filter_activation_fn)
layer = tf.reshape(layer, [tf.shape(inputs)[0], natoms[2+type_i] * self.get_dim_out()])
output.append(layer)
start_index += natoms[2+type_i]
Expand Down
61 changes: 48 additions & 13 deletions deepmd/utils/tabulate.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import deepmd
from typing import Callable
from typing import Tuple, List
from functools import lru_cache
from scipy.special import comb
from deepmd.env import tf
from deepmd.env import op_module
Expand Down Expand Up @@ -174,7 +175,7 @@ def build(self,
xx = np.append(xx, np.array([extrapolate * upper], dtype = self.data_type))
self.nspline = int((upper - lower) / stride0 + (extrapolate * upper - upper) / stride1)
for ii in range(self.table_size):
if self.type_one_side or (ii // self.ntypes, ii % self.ntypes) not in self.exclude_types:
if (self.type_one_side and not self._all_excluded(ii)) or (not self.type_one_side and (ii // self.ntypes, ii % self.ntypes) not in self.exclude_types):
if self.type_one_side:
net = "filter_-1_net_" + str(ii)
else:
Expand All @@ -198,7 +199,7 @@ def build(self,
xx = np.append(xx, np.array([extrapolate * upper], dtype = self.data_type))
self.nspline = int((upper - lower) / stride0 + (extrapolate * upper - upper) / stride1)
for ii in range(self.table_size):
if self.type_one_side or (ii // self.ntypes, ii % self.ntypes) not in self.exclude_types:
if (self.type_one_side and not self._all_excluded(ii)) or (not self.type_one_side and (ii // self.ntypes, ii % self.ntypes) not in self.exclude_types):
if self.type_one_side:
net = "filter_-1_net_" + str(ii)
else:
Expand Down Expand Up @@ -249,8 +250,11 @@ def _get_bias(self):
if isinstance(self.descrpt, deepmd.descriptor.DescrptSeA):
if self.type_one_side:
for ii in range(0, self.ntypes):
node = self.embedding_net_nodes[f"filter_type_all{self.suffix}/bias_{layer}_{ii}"]
bias["layer_" + str(layer)].append(tf.make_ndarray(node))
if not self._all_excluded(ii):
node = self.embedding_net_nodes[f"filter_type_all{self.suffix}/bias_{layer}_{ii}"]
bias["layer_" + str(layer)].append(tf.make_ndarray(node))
else:
bias["layer_" + str(layer)].append(np.array([]))
else:
for ii in range(0, self.ntypes * self.ntypes):
if (ii // self.ntypes, ii % self.ntypes) not in self.exclude_types:
Expand All @@ -266,8 +270,11 @@ def _get_bias(self):
elif isinstance(self.descrpt, deepmd.descriptor.DescrptSeR):
if self.type_one_side:
for ii in range(0, self.ntypes):
node = self.embedding_net_nodes[f"filter_type_all{self.suffix}/bias_{layer}_{ii}"]
bias["layer_" + str(layer)].append(tf.make_ndarray(node))
if not self._all_excluded(ii):
node = self.embedding_net_nodes[f"filter_type_all{self.suffix}/bias_{layer}_{ii}"]
bias["layer_" + str(layer)].append(tf.make_ndarray(node))
else:
bias["layer_" + str(layer)].append(np.array([]))
else:
for ii in range(0, self.ntypes * self.ntypes):
if (ii // self.ntypes, ii % self.ntypes) not in self.exclude_types:
Expand All @@ -286,8 +293,11 @@ def _get_matrix(self):
if isinstance(self.descrpt, deepmd.descriptor.DescrptSeA):
if self.type_one_side:
for ii in range(0, self.ntypes):
node = self.embedding_net_nodes[f"filter_type_all{self.suffix}/matrix_{layer}_{ii}"]
matrix["layer_" + str(layer)].append(tf.make_ndarray(node))
if not self._all_excluded(ii):
node = self.embedding_net_nodes[f"filter_type_all{self.suffix}/matrix_{layer}_{ii}"]
matrix["layer_" + str(layer)].append(tf.make_ndarray(node))
else:
matrix["layer_" + str(layer)].append(np.array([]))
else:
for ii in range(0, self.ntypes * self.ntypes):
if (ii // self.ntypes, ii % self.ntypes) not in self.exclude_types:
Expand All @@ -303,8 +313,11 @@ def _get_matrix(self):
elif isinstance(self.descrpt, deepmd.descriptor.DescrptSeR):
if self.type_one_side:
for ii in range(0, self.ntypes):
node = self.embedding_net_nodes[f"filter_type_all{self.suffix}/matrix_{layer}_{ii}"]
matrix["layer_" + str(layer)].append(tf.make_ndarray(node))
if not self._all_excluded(ii):
node = self.embedding_net_nodes[f"filter_type_all{self.suffix}/matrix_{layer}_{ii}"]
matrix["layer_" + str(layer)].append(tf.make_ndarray(node))
else:
matrix["layer_" + str(layer)].append(np.array([]))
else:
for ii in range(0, self.ntypes * self.ntypes):
if (ii // self.ntypes, ii % self.ntypes) not in self.exclude_types:
Expand Down Expand Up @@ -411,16 +424,38 @@ def _get_layer_size(self):
if isinstance(self.descrpt, deepmd.descriptor.DescrptSeA):
layer_size = len(self.embedding_net_nodes) // ((self.ntypes * self.ntypes - len(self.exclude_types)) * 2)
if self.type_one_side :
layer_size = len(self.embedding_net_nodes) // (self.ntypes * 2)
layer_size = len(self.embedding_net_nodes) // ((self.ntypes - self._n_all_excluded) * 2)
elif isinstance(self.descrpt, deepmd.descriptor.DescrptSeT):
layer_size = len(self.embedding_net_nodes) // int(comb(self.ntypes + 1, 2) * 2)
elif isinstance(self.descrpt, deepmd.descriptor.DescrptSeR):
layer_size = len(self.embedding_net_nodes) // ((self.ntypes * self.ntypes - len(self.exclude_types)) * 2)
if self.type_one_side :
layer_size = len(self.embedding_net_nodes) // (self.ntypes * 2)
layer_size = len(self.embedding_net_nodes) // ((self.ntypes - self._n_all_excluded) * 2)
else:
raise RuntimeError("Unsupported descriptor")
return layer_size

@property
@lru_cache()
def _n_all_excluded(self) -> int:
"""Then number of types excluding all types."""
return sum((int(self._all_excluded(ii)) for ii in range(0, self.ntypes)))

@lru_cache()
def _all_excluded(self, ii: int) -> bool:
"""Check if type ii excluds all types.

Parameters
----------
ii : int
type index

Returns
-------
bool
if type ii excluds all types
"""
return all([(ii, type_i) in self.exclude_types for type_i in range(self.ntypes)])

def _get_table_size(self):
table_size = 0
Expand Down Expand Up @@ -448,4 +483,4 @@ def _get_last_layer_size(self):
for item in self.matrix["layer_" + str(self.layer_size)]:
if len(item) != 0:
return item.shape[1]
return 0
return 0
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
import os,sys,platform,shutil,dpdata,json
import numpy as np
import unittest
import subprocess as sp

from deepmd.infer import DeepPot
from deepmd.env import MODEL_VERSION
# from deepmd.entrypoints.compress import compress
from common import j_loader, tests_path

from deepmd.env import GLOBAL_NP_FLOAT_PRECISION
if GLOBAL_NP_FLOAT_PRECISION == np.float32 :
default_places = 4
else :
default_places = 10

def _file_delete(file) :
if os.path.isdir(file):
os.rmdir(file)
elif os.path.isfile(file):
os.remove(file)

def _subprocess_run(command):
popen = sp.Popen(command.split(), shell=False, stdout=sp.PIPE, stderr=sp.STDOUT)
for line in iter(popen.stdout.readline, b''):
if hasattr(line, 'decode'):
line = line.decode('utf-8')
line = line.rstrip()
print(line)
popen.wait()
return popen.returncode

def _init_models():
data_file = str(tests_path / os.path.join("model_compression", "data"))
frozen_model = str(tests_path / "dp-original-type-one-side-exclude-types.pb")
compressed_model = str(tests_path / "dp-compressed-type-one-side-exclude-types.pb")
INPUT = str(tests_path / "input.json")
jdata = j_loader(str(tests_path / os.path.join("model_compression", "input.json")))
jdata["training"]["training_data"]["systems"] = data_file
jdata["training"]["validation_data"]["systems"] = data_file
jdata["model"]["descriptor"]["type_one_side"] = True
jdata["model"]["descriptor"]["exclude_types"] = [[0, 0]]
with open(INPUT, "w") as fp:
json.dump(jdata, fp, indent=4)

ret = _subprocess_run("dp train " + INPUT)
np.testing.assert_equal(ret, 0, 'DP train failed!')
ret = _subprocess_run("dp freeze -o " + frozen_model)
np.testing.assert_equal(ret, 0, 'DP freeze failed!')
ret = _subprocess_run("dp compress " + " -i " + frozen_model + " -o " + compressed_model)
np.testing.assert_equal(ret, 0, 'DP model compression failed!')
return INPUT, frozen_model, compressed_model

INPUT, FROZEN_MODEL, COMPRESSED_MODEL = _init_models()

class TestDeepPotAPBCTypeOneSideExcludeTypes(unittest.TestCase) :
@classmethod
def setUpClass(self):
self.dp_original = DeepPot(FROZEN_MODEL)
self.dp_compressed = DeepPot(COMPRESSED_MODEL)
self.coords = np.array([12.83, 2.56, 2.18,
12.09, 2.87, 2.74,
00.25, 3.32, 1.68,
3.36, 3.00, 1.81,
3.51, 2.51, 2.60,
4.27, 3.22, 1.56])
self.atype = [0, 1, 1, 0, 1, 1]
self.box = np.array([13., 0., 0., 0., 13., 0., 0., 0., 13.])

def test_attrs(self):
self.assertEqual(self.dp_original.get_ntypes(), 2)
self.assertAlmostEqual(self.dp_original.get_rcut(), 6.0, places = default_places)
self.assertEqual(self.dp_original.get_type_map(), ['O', 'H'])
self.assertEqual(self.dp_original.get_dim_fparam(), 0)
self.assertEqual(self.dp_original.get_dim_aparam(), 0)

self.assertEqual(self.dp_compressed.get_ntypes(), 2)
self.assertAlmostEqual(self.dp_compressed.get_rcut(), 6.0, places = default_places)
self.assertEqual(self.dp_compressed.get_type_map(), ['O', 'H'])
self.assertEqual(self.dp_compressed.get_dim_fparam(), 0)
self.assertEqual(self.dp_compressed.get_dim_aparam(), 0)

def test_1frame(self):
ee0, ff0, vv0 = self.dp_original.eval(self.coords, self.box, self.atype, atomic = False)
ee1, ff1, vv1 = self.dp_compressed.eval(self.coords, self.box, self.atype, atomic = False)
# check shape of the returns
nframes = 1
natoms = len(self.atype)
self.assertEqual(ee0.shape, (nframes,1))
self.assertEqual(ff0.shape, (nframes,natoms,3))
self.assertEqual(vv0.shape, (nframes,9))
self.assertEqual(ee1.shape, (nframes,1))
self.assertEqual(ff1.shape, (nframes,natoms,3))
self.assertEqual(vv1.shape, (nframes,9))
# check values
np.testing.assert_almost_equal(ff0, ff1, default_places)
np.testing.assert_almost_equal(ee0, ee1, default_places)
np.testing.assert_almost_equal(vv0, vv1, default_places)

def test_1frame_atm(self):
ee0, ff0, vv0, ae0, av0 = self.dp_original.eval(self.coords, self.box, self.atype, atomic = True)
ee1, ff1, vv1, ae1, av1 = self.dp_compressed.eval(self.coords, self.box, self.atype, atomic = True)
# check shape of the returns
nframes = 1
natoms = len(self.atype)
self.assertEqual(ee0.shape, (nframes,1))
self.assertEqual(ff0.shape, (nframes,natoms,3))
self.assertEqual(vv0.shape, (nframes,9))
self.assertEqual(ae0.shape, (nframes,natoms,1))
self.assertEqual(av0.shape, (nframes,natoms,9))
self.assertEqual(ee1.shape, (nframes,1))
self.assertEqual(ff1.shape, (nframes,natoms,3))
self.assertEqual(vv1.shape, (nframes,9))
self.assertEqual(ae1.shape, (nframes,natoms,1))
self.assertEqual(av1.shape, (nframes,natoms,9))
# check values
np.testing.assert_almost_equal(ff0, ff1, default_places)
np.testing.assert_almost_equal(ae0, ae1, default_places)
np.testing.assert_almost_equal(av0, av1, default_places)
np.testing.assert_almost_equal(ee0, ee1, default_places)
np.testing.assert_almost_equal(vv0, vv1, default_places)

def test_2frame_atm(self):
coords2 = np.concatenate((self.coords, self.coords))
box2 = np.concatenate((self.box, self.box))
ee0, ff0, vv0, ae0, av0 = self.dp_original.eval(coords2, box2, self.atype, atomic = True)
ee1, ff1, vv1, ae1, av1 = self.dp_compressed.eval(coords2, box2, self.atype, atomic = True)
# check shape of the returns
nframes = 2
natoms = len(self.atype)
self.assertEqual(ee0.shape, (nframes,1))
self.assertEqual(ff0.shape, (nframes,natoms,3))
self.assertEqual(vv0.shape, (nframes,9))
self.assertEqual(ae0.shape, (nframes,natoms,1))
self.assertEqual(av0.shape, (nframes,natoms,9))
self.assertEqual(ee1.shape, (nframes,1))
self.assertEqual(ff1.shape, (nframes,natoms,3))
self.assertEqual(vv1.shape, (nframes,9))
self.assertEqual(ae1.shape, (nframes,natoms,1))
self.assertEqual(av1.shape, (nframes,natoms,9))

# check values
np.testing.assert_almost_equal(ff0, ff1, default_places)
np.testing.assert_almost_equal(ae0, ae1, default_places)
np.testing.assert_almost_equal(av0, av1, default_places)
np.testing.assert_almost_equal(ee0, ee1, default_places)
np.testing.assert_almost_equal(vv0, vv1, default_places)
Loading