diff --git a/README.md b/README.md index 09ceb7ab1d07..d96038d17804 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ TVM is licensed under the [Apache-2.0](LICENSE) license. Getting Started --------------- Check out the [TVM Documentation](https://tvm.apache.org/docs/) site for installation instructions, tutorials, examples, and more. -The [Getting Started with TVM](https://tvm.apache.org/docs/tutorials/get_started/introduction.html) tutorial is a great +The [Getting Started with TVM](https://tvm.apache.org/docs/tutorial/introduction.html) tutorial is a great place to start. Contribute to TVM diff --git a/gallery/tutorial/autotvm_relay_x86.py b/gallery/tutorial/autotvm_relay_x86.py index 8b9c45c2a859..67b832cc226d 100644 --- a/gallery/tutorial/autotvm_relay_x86.py +++ b/gallery/tutorial/autotvm_relay_x86.py @@ -106,7 +106,7 @@ # TVMC has adopted NumPy's ``.npz`` format for both input and output data. # # As input for this tutorial, we will use the image of a cat, but you can feel -# free to substitute image for any of your choosing. +# free to substitute this image for any of your choosing. # # .. image:: https://s3.amazonaws.com/model-server/inputs/kitten.jpg # :height: 224px @@ -278,6 +278,7 @@ from tvm.autotvm.tuner import XGBTuner from tvm import autotvm +################################################################################ # Set up some basic parameters for the runner. The runner takes compiled code # that is generated with a specific set of parameters and measures the # performance of it. ``number`` specifies the number of different @@ -303,6 +304,7 @@ enable_cpu_cache_flush=True, ) +################################################################################ # Create a simple structure for holding tuning options. We use an XGBoost # algorithim for guiding the search. For a production job, you will want to set # the number of trials to be larger than the value of 10 used here. For CPU we @@ -426,6 +428,7 @@ for rank in ranks[0:5]: print("class='%s' with probability=%f" % (labels[rank], scores[rank])) +################################################################################ # Verifying that the predictions are the same: # # .. code-block:: bash diff --git a/gallery/tutorial/tensor_expr_get_started.py b/gallery/tutorial/tensor_expr_get_started.py index fda332cb63ba..e4d947d1c488 100644 --- a/gallery/tutorial/tensor_expr_get_started.py +++ b/gallery/tutorial/tensor_expr_get_started.py @@ -133,7 +133,7 @@ ################################################################################ # Let's run the function, and compare the output to the same computation in -# numpy. The compiled TVM function is exposes a concise C API that can be invoked +# numpy. The compiled TVM function exposes a concise C API that can be invoked # from any language. We begin by creating a device, which is a device (CPU in this # example) that TVM can compile the schedule to. In this case the device is an # LLVM CPU target. We can then initialize the tensors in our device and @@ -258,8 +258,8 @@ def evaluate_addition(func, target, optimization, log): print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ -# Comparing the Diferent Schedules -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# Comparing the Different Schedules +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # We can now compare the different schedules baseline = log[0][1] @@ -347,7 +347,7 @@ def evaluate_addition(func, target, optimization, log): fadd = tvm.build(s, [A, B, C], target=tgt_gpu, name="myadd") ################################################################################ - # The compiled TVM function is exposes a concise C API that can be invoked from + # The compiled TVM function exposes a concise C API that can be invoked from # any language. # # We provide a minimal array API in python to aid quick testing and prototyping. diff --git a/gallery/tutorial/tvmc_command_line_driver.py b/gallery/tutorial/tvmc_command_line_driver.py index 7a0b97895e4f..facb978cea67 100644 --- a/gallery/tutorial/tvmc_command_line_driver.py +++ b/gallery/tutorial/tvmc_command_line_driver.py @@ -174,10 +174,10 @@ # data types. For this reason, most models require some pre and # post-processing, to ensure the input is valid and to interpret the output. # TVMC has adopted NumPy's ``.npz`` format for both input and output data. This -# is a well-supported NumPy format to serialize multiple arrays into a file +# is a well-supported NumPy format to serialize multiple arrays into a file. # # As input for this tutorial, we will use the image of a cat, but you can feel -# free to substitute image for any of your choosing. +# free to substitute this image for any of your choosing. # # .. image:: https://s3.amazonaws.com/model-server/inputs/kitten.jpg # :height: 224px @@ -197,8 +197,8 @@ # requirement for the script. # # .. code-block:: python -# :caption: preprocess.py -# :name: preprocess.py +# :caption: preprocess.py +# :name: preprocess.py # # #!python ./preprocess.py # from tvm.contrib.download import download_testdata