From b7772f9af37b727e0577530cbba6e9205296afd5 Mon Sep 17 00:00:00 2001 From: Luke Hutton Date: Mon, 1 Nov 2021 18:07:55 +0000 Subject: [PATCH] [TVMC] Keep quantized weights when importing PyTorch model BYOC requires `keep_quantized_weight` be set to true when converting PyTorch models using `from_torch`. Setting this to be True when using TVMC. Change-Id: I8c183f9f802ea54d24679a4017e56481d84e5655 --- python/tvm/driver/tvmc/frontends.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/tvm/driver/tvmc/frontends.py b/python/tvm/driver/tvmc/frontends.py index 21d3d59fb013..13ab3dd170c3 100644 --- a/python/tvm/driver/tvmc/frontends.py +++ b/python/tvm/driver/tvmc/frontends.py @@ -262,7 +262,9 @@ def load(self, path, shape_dict=None, **kwargs): input_shapes = list(shape_dict.items()) logger.debug("parse Torch model and convert into Relay computation graph") - return relay.frontend.from_pytorch(traced_model, input_shapes, **kwargs) + return relay.frontend.from_pytorch( + traced_model, input_shapes, keep_quantized_weight=True, **kwargs + ) class PaddleFrontend(Frontend):