Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 6 additions & 35 deletions deepmd/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,12 @@
"float64": tf.float64,
}

PRECISION_MAPPING: Dict[int, type] = {
1: np.float32,
2: np.float64,
19: np.float16,
}


def gelu(x: tf.Tensor) -> tf.Tensor:
"""Gaussian Error Linear Unit.
Expand Down Expand Up @@ -485,38 +491,3 @@ def get_np_precision(precision: "_PRECISION") -> np.dtype:
return np.float64
else:
raise RuntimeError(f"{precision} is not a valid precision")


def get_tensor_by_name(model_file: str,
tensor_name: str) -> tf.Tensor:
"""Load tensor value from the frozen model(model_file)

Parameters
----------
model_file : str
The input frozen model.
tensor : tensor_name
Indicates which tensor which will be loaded from the frozen model.

Returns
-------
tf.Tensor
The tensor which was loaded from the frozen model.

Raises
------
GraphWithoutTensorError
Whether the tensor_name is within the frozen model.
"""
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="")
try:
tensor = graph.get_tensor_by_name(tensor_name + ":0")
except KeyError as e:
raise GraphWithoutTensorError() from e
with tf.Session(graph=graph) as sess:
tensor = run_sess(sess, tensor)
return tensor
73 changes: 69 additions & 4 deletions deepmd/descriptor/se_a.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import math
import numpy as np
from typing import Tuple, List
from typing import Tuple, List, Dict, Any

from deepmd.env import tf
from deepmd.common import get_activation_func, get_precision, ACTIVATION_FN_DICT, PRECISION_DICT, docstring_parameter, get_np_precision
Expand All @@ -10,7 +10,7 @@
from deepmd.env import op_module
from deepmd.env import default_tf_session_config
from deepmd.utils.network import embedding_net, embedding_net_rand_seed_shift
from deepmd.utils.tabulate import DeepTabulate
from deepmd.utils.tabulate import DPTabulate
from deepmd.utils.type_embed import embed_atom_type
from deepmd.utils.sess import run_sess

Expand Down Expand Up @@ -267,15 +267,15 @@ def enable_compression(self,
The overflow check frequency
"""
self.compress = True
self.model_file = model_file
self.table = DPTabulate(model_file, self.type_one_side, self.exclude_types)
self.table_config = [table_extrapolate, table_stride_1, table_stride_2, check_frequency]
self.table = DeepTabulate(self.model_file, self.type_one_side, self.exclude_types)
self.lower, self.upper \
= self.table.build(min_nbor_dist,
table_extrapolate,
table_stride_1,
table_stride_2)


def build (self,
coord_ : tf.Tensor,
atype_ : tf.Tensor,
Expand Down Expand Up @@ -392,6 +392,71 @@ def get_rot_mat(self) -> tf.Tensor:
"""
return self.qmat

def pass_tensors_from_frz_model(self,
descrpt_reshape : tf.Tensor,
descrpt_deriv : tf.Tensor,
rij : tf.Tensor,
nlist : tf.Tensor
):
"""
Pass the descrpt_reshape tensor as well as descrpt_deriv tensor from the frz graph_def

Parameters
----------
descrpt_reshape
The passed descrpt_reshape tensor
descrpt_deriv
The passed descrpt_deriv tensor
rij
The passed rij tensor
nlist
The passed nlist tensor
"""
self.rij = rij
self.nlist = nlist
self.descrpt_deriv = descrpt_deriv
self.descrpt_reshape = descrpt_reshape

def get_feed_dict(self,
coord_,
atype_,
natoms,
box,
mesh):
"""
generate the deed_dict for current descriptor

Parameters
----------
coord_
The coordinate of atoms
atype_
The type of atoms
natoms
The number of atoms. This tensor has the length of Ntypes + 2
natoms[0]: number of local atoms
natoms[1]: total number of atoms held by this processor
natoms[i]: 2 <= i < Ntypes+2, number of type i atoms
box
The box. Can be generated by deepmd.model.make_stat_input
mesh
For historical reasons, only the length of the Tensor matters.
if size of mesh == 6, pbc is assumed.
if size of mesh == 0, no-pbc is assumed.

Returns
-------
feed_dict
The output feed_dict of current descriptor
"""
feed_dict = {
't_coord:0' :coord_,
't_type:0' :atype_,
't_natoms:0' :natoms,
't_box:0' :box,
't_mesh:0' :mesh
}
return feed_dict

def prod_force_virial(self,
atom_ener : tf.Tensor,
Expand Down
8 changes: 5 additions & 3 deletions deepmd/entrypoints/compress.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,11 @@
from typing import Optional

from deepmd.env import tf
from deepmd.common import j_loader, get_tensor_by_name, GLOBAL_TF_FLOAT_PRECISION
from deepmd.common import j_loader, GLOBAL_TF_FLOAT_PRECISION
from deepmd.utils.argcheck import normalize
from deepmd.utils.compat import updata_deepmd_input
from deepmd.utils.errors import GraphTooLargeError, GraphWithoutTensorError
from deepmd.utils.graph import get_tensor_by_name

from .freeze import freeze
from .train import train, get_rcut, get_min_nbor_dist
Expand Down Expand Up @@ -77,9 +78,9 @@ def compress(
"Please consider using the --training-script command within the model compression interface to provide the training script of the input frozen model. "
"Note that the input training script must contain the correct path to the training data." % input
) from e
elif os.path.exists(training_script) == False:
elif not os.path.exists(training_script):
raise RuntimeError(
"The input training script %s does not exist! Please check the path of the training script. " % (input + "(" + os.path.abspath(input) + ")")
"The input training script %s (%s) does not exist! Please check the path of the training script. " % (input, os.path.abspath(input))
) from e
else:
log.info("stage 0: compute the min_nbor_dist")
Expand Down Expand Up @@ -121,6 +122,7 @@ def compress(
INPUT=control_file,
init_model=None,
restart=None,
init_frz_model=None,
output=control_file,
mpi_log=mpi_log,
log_level=log_level,
Expand Down
1 change: 1 addition & 0 deletions deepmd/entrypoints/freeze.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ def _make_node_names(model_type: str, modifier_type: Optional[str] = None) -> Li
if unknown model type
"""
nodes = [
"model_type",
"descrpt_attr/rcut",
"descrpt_attr/ntypes",
"model_attr/tmap",
Expand Down
7 changes: 7 additions & 0 deletions deepmd/entrypoints/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,13 @@ def parse_args(args: Optional[List[str]] = None):
default="out.json",
help="The output file of the parameters used in training.",
)
parser_train.add_argument(
"-f",
"--init-frz-model",
type=str,
default=None,
help="Initialize the training from the frozen model.",
)

# * freeze script ******************************************************************
parser_frz = subparsers.add_parser(
Expand Down
12 changes: 8 additions & 4 deletions deepmd/entrypoints/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ def train(
init_model: Optional[str],
restart: Optional[str],
output: str,
init_frz_model: str,
mpi_log: str,
log_level: int,
log_path: Optional[str],
Expand All @@ -50,13 +51,15 @@ def train(
path to checkpoint folder or None
output : str
path for dump file with arguments
init_frz_model : str
path to frozen model or None
mpi_log : str
mpi logging mode
log_level : int
logging level defined by int 0-3
log_path : Optional[str]
logging file path or None if logs are to be output only to stdout
is_compress: Bool
is_compress: bool
indicates whether in the model compress mode

Raises
Expand All @@ -71,7 +74,7 @@ def train(

jdata = normalize(jdata)

if is_compress == False:
if not is_compress:
jdata = update_sel(jdata)

with open(output, "w") as fp:
Expand All @@ -84,6 +87,7 @@ def train(
run_opt = RunOptions(
init_model=init_model,
restart=restart,
init_frz_model=init_frz_model,
log_path=log_path,
log_level=log_level,
mpi_log=mpi_log
Expand Down Expand Up @@ -141,7 +145,7 @@ def _do_work(jdata: Dict[str, Any], run_opt: RunOptions, is_compress: bool = Fal
# decouple the training data from the model compress process
train_data = None
valid_data = None
if is_compress == False:
if not is_compress:
# init data
train_data = get_data(jdata["training"]["training_data"], rcut, ipt_type_map, modifier)
train_data.print_summary("training")
Expand All @@ -153,7 +157,7 @@ def _do_work(jdata: Dict[str, Any], run_opt: RunOptions, is_compress: bool = Fal
stop_batch = j_must_have(jdata["training"], "numb_steps")
model.build(train_data, stop_batch)

if is_compress == False:
if not is_compress:
# train the model with the provided systems in a cyclic way
start_time = time.time()
model.train(train_data, valid_data)
Expand Down
7 changes: 1 addition & 6 deletions deepmd/entrypoints/transfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

from typing import Dict, Optional, Sequence, Tuple
from deepmd.env import tf
from deepmd.common import PRECISION_MAPPING
import re
import numpy as np
import logging
Expand All @@ -10,12 +11,6 @@

log = logging.getLogger(__name__)

PRECISION_MAPPING: Dict[int, type] = {
1: np.float32,
2: np.float64,
19: np.float16,
}


@np.vectorize
def convert_number(number: int) -> float:
Expand Down
25 changes: 22 additions & 3 deletions deepmd/fit/ener.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,9 @@ def __init__ (self,
self.aparam_std = None
self.aparam_inv_std = None

self.compress = False
self.fitting_net_variables = None

def get_numb_fparam(self) -> int:
"""
Get the number of frame parameters
Expand Down Expand Up @@ -257,7 +260,8 @@ def _build_lower(
activation_fn = self.fitting_activation_fn,
precision = self.fitting_precision,
trainable = self.trainable[ii],
uniform_seed = self.uniform_seed)
uniform_seed = self.uniform_seed,
initial_variables = self.fitting_net_variables)
else :
layer = one_layer(
layer,
Expand All @@ -268,7 +272,8 @@ def _build_lower(
activation_fn = self.fitting_activation_fn,
precision = self.fitting_precision,
trainable = self.trainable[ii],
uniform_seed = self.uniform_seed)
uniform_seed = self.uniform_seed,
initial_variables = self.fitting_net_variables)
if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift
final_layer = one_layer(
layer,
Expand All @@ -280,7 +285,8 @@ def _build_lower(
seed = self.seed,
precision = self.fitting_precision,
trainable = self.trainable[-1],
uniform_seed = self.uniform_seed)
uniform_seed = self.uniform_seed,
initial_variables = self.fitting_net_variables)
if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift

return final_layer
Expand Down Expand Up @@ -445,3 +451,16 @@ def build (self,
return tf.cast(tf.reshape(outs, [-1]), GLOBAL_TF_FLOAT_PRECISION)


def init_variables(self,
fitting_net_variables: dict
) -> None:
"""
Init the fitting net variables with the given dict

Parameters
----------
fitting_net_variables
The input dict which stores the fitting net variables
"""
self.compress = True
self.fitting_net_variables = fitting_net_variables
Loading