From 9db661be5ef3a5edcab263a360ae282a57245a27 Mon Sep 17 00:00:00 2001 From: Michal Piszczek Date: Thu, 23 Apr 2020 14:52:41 -0700 Subject: [PATCH 01/10] Build ci cpu image with TFLITE ON --- docker/Dockerfile.ci_cpu | 4 ++++ docker/install/ubuntu_install_tflite.sh | 12 +++++++++--- tests/scripts/task_config_build_cpu.sh | 3 +++ 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/docker/Dockerfile.ci_cpu b/docker/Dockerfile.ci_cpu index 10c8c62d970b..82663701eab2 100644 --- a/docker/Dockerfile.ci_cpu +++ b/docker/Dockerfile.ci_cpu @@ -63,3 +63,7 @@ RUN bash /install/ubuntu_install_antlr.sh # Chisel deps for TSIM COPY install/ubuntu_install_chisel.sh /install/ubuntu_install_chisel.sh RUN bash /install/ubuntu_install_chisel.sh + +# TFLite deps +COPY install/ubuntu_install_tflite.sh /install/ubuntu_install_tflite.sh +RUN bash /install/ubuntu_install_tflite.sh diff --git a/docker/install/ubuntu_install_tflite.sh b/docker/install/ubuntu_install_tflite.sh index 49b0f2badf82..3a34db98b9a7 100755 --- a/docker/install/ubuntu_install_tflite.sh +++ b/docker/install/ubuntu_install_tflite.sh @@ -6,9 +6,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -26,12 +26,18 @@ cd flatbuffers cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release make install -j8 cd .. -rm -rf flatbuffers # Install flatbuffers python packages. pip3 install flatbuffers pip2 install flatbuffers +# Build the TFLite static library, necessary for building with TFLite ON. +# The library is built at: +# tensorflow/tensorflow/lite/tools/make/gen/*/lib/libtensorflow-lite.a. +git clone https://github.com/tensorflow/tensorflow --branch=r2.0 +./tensorflow/tensorflow/lite/tools/make/download_dependencies.sh +./tensorflow/tensorflow/lite/tools/make/build_lib.sh + # Setup tflite from schema mkdir tflite cd tflite diff --git a/tests/scripts/task_config_build_cpu.sh b/tests/scripts/task_config_build_cpu.sh index 912e59eb0330..72a41f59d3b1 100755 --- a/tests/scripts/task_config_build_cpu.sh +++ b/tests/scripts/task_config_build_cpu.sh @@ -38,3 +38,6 @@ echo set\(CMAKE_CXX_FLAGS -Werror\) >> config.cmake echo set\(HIDE_PRIVATE_SYMBOLS ON\) >> config.cmake echo set\(USE_VTA_TSIM ON\) >> config.cmake echo set\(USE_VTA_FSIM ON\) >> config.cmake +echo set\(USE_TFLITE ON\) >> config.cmake +echo set\(USE_TENSORFLOW_PATH \"/tensorflow\"\) >> config.cmake +echo set\(USE_FLATBUFFERS_PATH \"/flatbuffers\"\) >> config.cmake \ No newline at end of file From 5c4d0899f8060572a27f5c30a15cb5c12f9303c6 Mon Sep 17 00:00:00 2001 From: Michal Piszczek Date: Thu, 23 Apr 2020 16:11:33 -0700 Subject: [PATCH 02/10] Enable test for tflite remote execution via rpc The check_local() test is flakey, something to do with how we retrieve outputs from the tflite interpreter. Leaving it commented it out for now. --- tests/python/contrib/test_tflite_runtime.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/tests/python/contrib/test_tflite_runtime.py b/tests/python/contrib/test_tflite_runtime.py index 8c883b031a89..a6198a7e4852 100644 --- a/tests/python/contrib/test_tflite_runtime.py +++ b/tests/python/contrib/test_tflite_runtime.py @@ -19,8 +19,7 @@ import numpy as np from tvm import rpc from tvm.contrib import util, tflite_runtime -# import tensorflow as tf -# import tflite_runtime.interpreter as tflite +import tensorflow as tf def skipped_test_tflite_runtime(): @@ -45,7 +44,7 @@ def check_local(): open(tflite_model_path, 'wb').write(tflite_model) # inference via tflite interpreter python apis - interpreter = tflite.Interpreter(model_path=tflite_model_path) + interpreter = tf.lite.Interpreter(model_path=tflite_model_path) interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() @@ -73,7 +72,7 @@ def check_remote(): open(tflite_model_path, 'wb').write(tflite_model) # inference via tflite interpreter python apis - interpreter = tflite.Interpreter(model_path=tflite_model_path) + interpreter = tf.lite.Interpreter(model_path=tflite_model_path) interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() @@ -97,9 +96,8 @@ def check_remote(): out = runtime.get_output(0) np.testing.assert_equal(out.asnumpy(), tflite_output) - check_local() + # check_local() check_remote() if __name__ == "__main__": - # skipped_test_tflite_runtime() - pass + skipped_test_tflite_runtime() From 77cac8dabd655fbfe4627627c0a4a001cc46f187 Mon Sep 17 00:00:00 2001 From: Michal Piszczek Date: Thu, 23 Apr 2020 17:51:57 -0700 Subject: [PATCH 03/10] Add tensorflow dep to ci cpu image --- docker/Dockerfile.ci_cpu | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docker/Dockerfile.ci_cpu b/docker/Dockerfile.ci_cpu index 82663701eab2..9be9cab1fd1f 100644 --- a/docker/Dockerfile.ci_cpu +++ b/docker/Dockerfile.ci_cpu @@ -67,3 +67,7 @@ RUN bash /install/ubuntu_install_chisel.sh # TFLite deps COPY install/ubuntu_install_tflite.sh /install/ubuntu_install_tflite.sh RUN bash /install/ubuntu_install_tflite.sh + +# TensorFlow deps +COPY install/ubuntu_install_tensorflow.sh /install/ubuntu_install_tensorflow.sh +RUN bash /install/ubuntu_install_tensorflow.sh \ No newline at end of file From 47b47b1ff9d36cb7e28a22a840b23920ea512b0d Mon Sep 17 00:00:00 2001 From: Michal Piszczek Date: Thu, 23 Apr 2020 17:52:54 -0700 Subject: [PATCH 04/10] Make tflite unit tests findable by pytest --- tests/python/contrib/test_tflite_runtime.py | 156 ++++++++++---------- 1 file changed, 78 insertions(+), 78 deletions(-) diff --git a/tests/python/contrib/test_tflite_runtime.py b/tests/python/contrib/test_tflite_runtime.py index a6198a7e4852..6aff34473b60 100644 --- a/tests/python/contrib/test_tflite_runtime.py +++ b/tests/python/contrib/test_tflite_runtime.py @@ -14,90 +14,90 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +import pytest +import tensorflow as tf + import tvm from tvm import te import numpy as np from tvm import rpc from tvm.contrib import util, tflite_runtime -import tensorflow as tf -def skipped_test_tflite_runtime(): - - def create_tflite_model(): - root = tf.Module() - root.const = tf.constant([1., 2.], tf.float32) - root.f = tf.function(lambda x: root.const * x) - - input_signature = tf.TensorSpec(shape=[2, ], dtype=tf.float32) - concrete_func = root.f.get_concrete_function(input_signature) - converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func]) - tflite_model = converter.convert() - return tflite_model - - - def check_local(): - tflite_fname = "model.tflite" - tflite_model = create_tflite_model() - temp = util.tempdir() - tflite_model_path = temp.relpath(tflite_fname) - open(tflite_model_path, 'wb').write(tflite_model) - - # inference via tflite interpreter python apis - interpreter = tf.lite.Interpreter(model_path=tflite_model_path) - interpreter.allocate_tensors() - input_details = interpreter.get_input_details() - output_details = interpreter.get_output_details() - - input_shape = input_details[0]['shape'] - tflite_input = np.array(np.random.random_sample(input_shape), dtype=np.float32) - interpreter.set_tensor(input_details[0]['index'], tflite_input) - interpreter.invoke() - tflite_output = interpreter.get_tensor(output_details[0]['index']) - - # inference via tvm tflite runtime - with open(tflite_model_path, 'rb') as model_fin: - runtime = tflite_runtime.create(model_fin.read(), tvm.cpu(0)) - runtime.set_input(0, tvm.nd.array(tflite_input)) - runtime.invoke() - out = runtime.get_output(0) - np.testing.assert_equal(out.asnumpy(), tflite_output) - - - def check_remote(): - tflite_fname = "model.tflite" - tflite_model = create_tflite_model() - temp = util.tempdir() - tflite_model_path = temp.relpath(tflite_fname) - open(tflite_model_path, 'wb').write(tflite_model) - - # inference via tflite interpreter python apis - interpreter = tf.lite.Interpreter(model_path=tflite_model_path) - interpreter.allocate_tensors() - input_details = interpreter.get_input_details() - output_details = interpreter.get_output_details() - - input_shape = input_details[0]['shape'] - tflite_input = np.array(np.random.random_sample(input_shape), dtype=np.float32) - interpreter.set_tensor(input_details[0]['index'], tflite_input) - interpreter.invoke() - tflite_output = interpreter.get_tensor(output_details[0]['index']) - - # inference via remote tvm tflite runtime - server = rpc.Server("localhost") - remote = rpc.connect(server.host, server.port) - ctx = remote.cpu(0) - a = remote.upload(tflite_model_path) - - with open(tflite_model_path, 'rb') as model_fin: - runtime = tflite_runtime.create(model_fin.read(), remote.cpu(0)) - runtime.set_input(0, tvm.nd.array(tflite_input, remote.cpu(0))) - runtime.invoke() - out = runtime.get_output(0) - np.testing.assert_equal(out.asnumpy(), tflite_output) - - # check_local() - check_remote() +def _create_tflite_model(): + root = tf.Module() + root.const = tf.constant([1., 2.], tf.float32) + root.f = tf.function(lambda x: root.const * x) + + input_signature = tf.TensorSpec(shape=[2, ], dtype=tf.float32) + concrete_func = root.f.get_concrete_function(input_signature) + converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func]) + tflite_model = converter.convert() + return tflite_model + + +@pytest.mark.skip('skip because getting outputs is flakey') +def test_local(): + tflite_fname = "model.tflite" + tflite_model = _create_tflite_model() + temp = util.tempdir() + tflite_model_path = temp.relpath(tflite_fname) + open(tflite_model_path, 'wb').write(tflite_model) + + # inference via tflite interpreter python apis + interpreter = tf.lite.Interpreter(model_path=tflite_model_path) + interpreter.allocate_tensors() + input_details = interpreter.get_input_details() + output_details = interpreter.get_output_details() + + input_shape = input_details[0]['shape'] + tflite_input = np.array(np.random.random_sample(input_shape), dtype=np.float32) + interpreter.set_tensor(input_details[0]['index'], tflite_input) + interpreter.invoke() + tflite_output = interpreter.get_tensor(output_details[0]['index']) + + # inference via tvm tflite runtime + with open(tflite_model_path, 'rb') as model_fin: + runtime = tflite_runtime.create(model_fin.read(), tvm.cpu(0)) + runtime.set_input(0, tvm.nd.array(tflite_input)) + runtime.invoke() + out = runtime.get_output(0) + np.testing.assert_equal(out.asnumpy(), tflite_output) + + +def test_remote(): + tflite_fname = "model.tflite" + tflite_model = _create_tflite_model() + temp = util.tempdir() + tflite_model_path = temp.relpath(tflite_fname) + open(tflite_model_path, 'wb').write(tflite_model) + + # inference via tflite interpreter python apis + interpreter = tf.lite.Interpreter(model_path=tflite_model_path) + interpreter.allocate_tensors() + input_details = interpreter.get_input_details() + output_details = interpreter.get_output_details() + + input_shape = input_details[0]['shape'] + tflite_input = np.array(np.random.random_sample(input_shape), dtype=np.float32) + interpreter.set_tensor(input_details[0]['index'], tflite_input) + interpreter.invoke() + tflite_output = interpreter.get_tensor(output_details[0]['index']) + + # inference via remote tvm tflite runtime + server = rpc.Server("localhost") + remote = rpc.connect(server.host, server.port) + ctx = remote.cpu(0) + a = remote.upload(tflite_model_path) + + with open(tflite_model_path, 'rb') as model_fin: + runtime = tflite_runtime.create(model_fin.read(), remote.cpu(0)) + runtime.set_input(0, tvm.nd.array(tflite_input, remote.cpu(0))) + runtime.invoke() + out = runtime.get_output(0) + np.testing.assert_equal(out.asnumpy(), tflite_output) + if __name__ == "__main__": - skipped_test_tflite_runtime() + test_local() + test_remote() From cc4d55bd6f07b98aea8fcb2488c2746342cf1db4 Mon Sep 17 00:00:00 2001 From: Michal Piszczek Date: Fri, 24 Apr 2020 08:03:35 -0700 Subject: [PATCH 05/10] Add newlines where appropriate --- docker/Dockerfile.ci_cpu | 2 +- tests/scripts/task_config_build_cpu.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/Dockerfile.ci_cpu b/docker/Dockerfile.ci_cpu index 9be9cab1fd1f..abd7c0d04dab 100644 --- a/docker/Dockerfile.ci_cpu +++ b/docker/Dockerfile.ci_cpu @@ -70,4 +70,4 @@ RUN bash /install/ubuntu_install_tflite.sh # TensorFlow deps COPY install/ubuntu_install_tensorflow.sh /install/ubuntu_install_tensorflow.sh -RUN bash /install/ubuntu_install_tensorflow.sh \ No newline at end of file +RUN bash /install/ubuntu_install_tensorflow.sh diff --git a/tests/scripts/task_config_build_cpu.sh b/tests/scripts/task_config_build_cpu.sh index 72a41f59d3b1..32798f3b2c44 100755 --- a/tests/scripts/task_config_build_cpu.sh +++ b/tests/scripts/task_config_build_cpu.sh @@ -40,4 +40,4 @@ echo set\(USE_VTA_TSIM ON\) >> config.cmake echo set\(USE_VTA_FSIM ON\) >> config.cmake echo set\(USE_TFLITE ON\) >> config.cmake echo set\(USE_TENSORFLOW_PATH \"/tensorflow\"\) >> config.cmake -echo set\(USE_FLATBUFFERS_PATH \"/flatbuffers\"\) >> config.cmake \ No newline at end of file +echo set\(USE_FLATBUFFERS_PATH \"/flatbuffers\"\) >> config.cmake From 921ca28e3643f9febc6d6a3e2da0953a97d7a465 Mon Sep 17 00:00:00 2001 From: Michal Piszczek Date: Fri, 24 Apr 2020 08:09:03 -0700 Subject: [PATCH 06/10] Re-word pytest skip annotation --- tests/python/contrib/test_tflite_runtime.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/contrib/test_tflite_runtime.py b/tests/python/contrib/test_tflite_runtime.py index 6aff34473b60..6865b5c1f6f7 100644 --- a/tests/python/contrib/test_tflite_runtime.py +++ b/tests/python/contrib/test_tflite_runtime.py @@ -36,7 +36,7 @@ def _create_tflite_model(): return tflite_model -@pytest.mark.skip('skip because getting outputs is flakey') +@pytest.mark.skip('skip because accessing output tensor is flakey') def test_local(): tflite_fname = "model.tflite" tflite_model = _create_tflite_model() From 9bd7261b875ad3d6907d4815132b22f8dd72bc7f Mon Sep 17 00:00:00 2001 From: Michal Piszczek Date: Fri, 24 Apr 2020 09:54:01 -0700 Subject: [PATCH 07/10] Revert test file --- tests/python/contrib/test_tflite_runtime.py | 162 ++++++++++---------- 1 file changed, 82 insertions(+), 80 deletions(-) diff --git a/tests/python/contrib/test_tflite_runtime.py b/tests/python/contrib/test_tflite_runtime.py index 6865b5c1f6f7..7c4d7324f623 100644 --- a/tests/python/contrib/test_tflite_runtime.py +++ b/tests/python/contrib/test_tflite_runtime.py @@ -14,90 +14,92 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -import pytest -import tensorflow as tf - import tvm from tvm import te import numpy as np from tvm import rpc from tvm.contrib import util, tflite_runtime - - -def _create_tflite_model(): - root = tf.Module() - root.const = tf.constant([1., 2.], tf.float32) - root.f = tf.function(lambda x: root.const * x) - - input_signature = tf.TensorSpec(shape=[2, ], dtype=tf.float32) - concrete_func = root.f.get_concrete_function(input_signature) - converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func]) - tflite_model = converter.convert() - return tflite_model - - -@pytest.mark.skip('skip because accessing output tensor is flakey') -def test_local(): - tflite_fname = "model.tflite" - tflite_model = _create_tflite_model() - temp = util.tempdir() - tflite_model_path = temp.relpath(tflite_fname) - open(tflite_model_path, 'wb').write(tflite_model) - - # inference via tflite interpreter python apis - interpreter = tf.lite.Interpreter(model_path=tflite_model_path) - interpreter.allocate_tensors() - input_details = interpreter.get_input_details() - output_details = interpreter.get_output_details() - - input_shape = input_details[0]['shape'] - tflite_input = np.array(np.random.random_sample(input_shape), dtype=np.float32) - interpreter.set_tensor(input_details[0]['index'], tflite_input) - interpreter.invoke() - tflite_output = interpreter.get_tensor(output_details[0]['index']) - - # inference via tvm tflite runtime - with open(tflite_model_path, 'rb') as model_fin: - runtime = tflite_runtime.create(model_fin.read(), tvm.cpu(0)) - runtime.set_input(0, tvm.nd.array(tflite_input)) - runtime.invoke() - out = runtime.get_output(0) - np.testing.assert_equal(out.asnumpy(), tflite_output) - - -def test_remote(): - tflite_fname = "model.tflite" - tflite_model = _create_tflite_model() - temp = util.tempdir() - tflite_model_path = temp.relpath(tflite_fname) - open(tflite_model_path, 'wb').write(tflite_model) - - # inference via tflite interpreter python apis - interpreter = tf.lite.Interpreter(model_path=tflite_model_path) - interpreter.allocate_tensors() - input_details = interpreter.get_input_details() - output_details = interpreter.get_output_details() - - input_shape = input_details[0]['shape'] - tflite_input = np.array(np.random.random_sample(input_shape), dtype=np.float32) - interpreter.set_tensor(input_details[0]['index'], tflite_input) - interpreter.invoke() - tflite_output = interpreter.get_tensor(output_details[0]['index']) - - # inference via remote tvm tflite runtime - server = rpc.Server("localhost") - remote = rpc.connect(server.host, server.port) - ctx = remote.cpu(0) - a = remote.upload(tflite_model_path) - - with open(tflite_model_path, 'rb') as model_fin: - runtime = tflite_runtime.create(model_fin.read(), remote.cpu(0)) - runtime.set_input(0, tvm.nd.array(tflite_input, remote.cpu(0))) - runtime.invoke() - out = runtime.get_output(0) - np.testing.assert_equal(out.asnumpy(), tflite_output) - +# import tensorflow as tf +# import tflite_runtime.interpreter as tflite + + +def skipped_test_tflite_runtime(): + + def create_tflite_model(): + root = tf.Module() + root.const = tf.constant([1., 2.], tf.float32) + root.f = tf.function(lambda x: root.const * x) + + input_signature = tf.TensorSpec(shape=[2, ], dtype=tf.float32) + concrete_func = root.f.get_concrete_function(input_signature) + converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func]) + tflite_model = converter.convert() + return tflite_model + + + def check_local(): + tflite_fname = "model.tflite" + tflite_model = create_tflite_model() + temp = util.tempdir() + tflite_model_path = temp.relpath(tflite_fname) + open(tflite_model_path, 'wb').write(tflite_model) + + # inference via tflite interpreter python apis + interpreter = tflite.Interpreter(model_path=tflite_model_path) + interpreter.allocate_tensors() + input_details = interpreter.get_input_details() + output_details = interpreter.get_output_details() + + input_shape = input_details[0]['shape'] + tflite_input = np.array(np.random.random_sample(input_shape), dtype=np.float32) + interpreter.set_tensor(input_details[0]['index'], tflite_input) + interpreter.invoke() + tflite_output = interpreter.get_tensor(output_details[0]['index']) + + # inference via tvm tflite runtime + with open(tflite_model_path, 'rb') as model_fin: + runtime = tflite_runtime.create(model_fin.read(), tvm.cpu(0)) + runtime.set_input(0, tvm.nd.array(tflite_input)) + runtime.invoke() + out = runtime.get_output(0) + np.testing.assert_equal(out.asnumpy(), tflite_output) + + + def check_remote(): + tflite_fname = "model.tflite" + tflite_model = create_tflite_model() + temp = util.tempdir() + tflite_model_path = temp.relpath(tflite_fname) + open(tflite_model_path, 'wb').write(tflite_model) + + # inference via tflite interpreter python apis + interpreter = tflite.Interpreter(model_path=tflite_model_path) + interpreter.allocate_tensors() + input_details = interpreter.get_input_details() + output_details = interpreter.get_output_details() + + input_shape = input_details[0]['shape'] + tflite_input = np.array(np.random.random_sample(input_shape), dtype=np.float32) + interpreter.set_tensor(input_details[0]['index'], tflite_input) + interpreter.invoke() + tflite_output = interpreter.get_tensor(output_details[0]['index']) + + # inference via remote tvm tflite runtime + server = rpc.Server("localhost") + remote = rpc.connect(server.host, server.port) + ctx = remote.cpu(0) + a = remote.upload(tflite_model_path) + + with open(tflite_model_path, 'rb') as model_fin: + runtime = tflite_runtime.create(model_fin.read(), remote.cpu(0)) + runtime.set_input(0, tvm.nd.array(tflite_input, remote.cpu(0))) + runtime.invoke() + out = runtime.get_output(0) + np.testing.assert_equal(out.asnumpy(), tflite_output) + + check_local() + check_remote() if __name__ == "__main__": - test_local() - test_remote() + # skipped_test_tflite_runtime() + pass \ No newline at end of file From 15230ceb37ad5d9ff3172cba1a2b32e355b7b2b1 Mon Sep 17 00:00:00 2001 From: Michal Piszczek Date: Fri, 24 Apr 2020 09:56:13 -0700 Subject: [PATCH 08/10] Fix whitespace --- tests/python/contrib/test_tflite_runtime.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/contrib/test_tflite_runtime.py b/tests/python/contrib/test_tflite_runtime.py index 7c4d7324f623..8c883b031a89 100644 --- a/tests/python/contrib/test_tflite_runtime.py +++ b/tests/python/contrib/test_tflite_runtime.py @@ -102,4 +102,4 @@ def check_remote(): if __name__ == "__main__": # skipped_test_tflite_runtime() - pass \ No newline at end of file + pass From 27eeed599b626a82bd665b77082d94c3dc6cc76d Mon Sep 17 00:00:00 2001 From: Michal Piszczek Date: Fri, 24 Apr 2020 10:39:56 -0700 Subject: [PATCH 09/10] Set TFLite off in cpu config --- tests/scripts/task_config_build_cpu.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/scripts/task_config_build_cpu.sh b/tests/scripts/task_config_build_cpu.sh index 32798f3b2c44..912e59eb0330 100755 --- a/tests/scripts/task_config_build_cpu.sh +++ b/tests/scripts/task_config_build_cpu.sh @@ -38,6 +38,3 @@ echo set\(CMAKE_CXX_FLAGS -Werror\) >> config.cmake echo set\(HIDE_PRIVATE_SYMBOLS ON\) >> config.cmake echo set\(USE_VTA_TSIM ON\) >> config.cmake echo set\(USE_VTA_FSIM ON\) >> config.cmake -echo set\(USE_TFLITE ON\) >> config.cmake -echo set\(USE_TENSORFLOW_PATH \"/tensorflow\"\) >> config.cmake -echo set\(USE_FLATBUFFERS_PATH \"/flatbuffers\"\) >> config.cmake From abd7ffcc38c954c2874d72b6930180607152c11a Mon Sep 17 00:00:00 2001 From: Michal Piszczek Date: Fri, 24 Apr 2020 13:17:02 -0700 Subject: [PATCH 10/10] Build tflite runtime with latest stable tf --- docker/install/ubuntu_install_tflite.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/install/ubuntu_install_tflite.sh b/docker/install/ubuntu_install_tflite.sh index 3a34db98b9a7..f7ed4841b328 100755 --- a/docker/install/ubuntu_install_tflite.sh +++ b/docker/install/ubuntu_install_tflite.sh @@ -34,7 +34,7 @@ pip2 install flatbuffers # Build the TFLite static library, necessary for building with TFLite ON. # The library is built at: # tensorflow/tensorflow/lite/tools/make/gen/*/lib/libtensorflow-lite.a. -git clone https://github.com/tensorflow/tensorflow --branch=r2.0 +git clone https://github.com/tensorflow/tensorflow --branch=r2.1 ./tensorflow/tensorflow/lite/tools/make/download_dependencies.sh ./tensorflow/tensorflow/lite/tools/make/build_lib.sh