diff --git a/docs/_static/css/tvm_theme.css b/docs/_static/css/tvm_theme.css new file mode 100644 index 000000000000..5e0838abf6cb --- /dev/null +++ b/docs/_static/css/tvm_theme.css @@ -0,0 +1,11 @@ +.rst-content .hidden-section { + display: none; +} + +.rst-toc .hidden-section { + display: none; +} + +nav .hidden-section { + display: inherit; +} diff --git a/docs/api/python/build.rst b/docs/api/python/build.rst new file mode 100644 index 000000000000..4cf936910e1b --- /dev/null +++ b/docs/api/python/build.rst @@ -0,0 +1,5 @@ +tvm.build +--------- +.. autofunction:: tvm.lower + +.. autofunction:: tvm.build diff --git a/docs/api/python/collection.rst b/docs/api/python/collection.rst new file mode 100644 index 000000000000..1eb83861d940 --- /dev/null +++ b/docs/api/python/collection.rst @@ -0,0 +1,6 @@ +tvm.collections +--------------- +Collections contains data structures used in TVM DSL. + +.. automodule:: tvm.collections + :members: diff --git a/docs/api/python/dev.rst b/docs/api/python/dev.rst new file mode 100644 index 000000000000..11dcd9609ccb --- /dev/null +++ b/docs/api/python/dev.rst @@ -0,0 +1,65 @@ +Developer API +------------- +This page contains modules that are used by developers of TVM. + +tvm.node +~~~~~~~~ +Node is the base class of all TVM AST. Normally user do not +need to touch this api. + +.. autoclass:: tvm.node.NodeBase + :members: + +.. autoclass:: tvm.node.Node + :members: + +.. autofunction:: tvm.register_node + +tvm.expr +~~~~~~~~ +.. automodule:: tvm.expr + :members: + :undoc-members: + +tvm.codegen +~~~~~~~~~~~ +.. automodule:: tvm.codegen + :members: + :undoc-members: + +tvm.stmt +~~~~~~~~ +.. automodule:: tvm.stmt + :members: + :undoc-members: + +tvm.ir_pass +~~~~~~~~~~~ +.. automodule:: tvm.ir_pass + :members: + +.. autosummary:: + + tvm.ir_pass.Inline + tvm.ir_pass.Simplify + tvm.ir_pass.ConvertSSA + tvm.ir_pass.VerifySSA + tvm.ir_pass.CanonicalSimplify + tvm.ir_pass.StorageFlatten + tvm.ir_pass.VectorizeLoop + tvm.ir_pass.UnrollLoop + tvm.ir_pass.StorageSync + tvm.ir_pass.MakeAPI + tvm.ir_pass.SplitHostDevice + tvm.ir_pass.InjectVirtualThread + tvm.ir_pass.LoopPartition + tvm.ir_pass.RemoveNoOp + tvm.ir_pass.SplitPipeline + tvm.ir_pass.LowerThreadAllreduce + tvm.ir_pass.NarrowChannelAccess + + +tvm.make +~~~~~~~~ +.. automodule:: tvm.make + :members: diff --git a/docs/api/python/function.rst b/docs/api/python/function.rst new file mode 100644 index 000000000000..50a79729bc9a --- /dev/null +++ b/docs/api/python/function.rst @@ -0,0 +1,7 @@ +tvm.Function +------------ +.. autoclass:: tvm.Function + +.. autofunction:: tvm.register_func + +.. autofunction:: tvm.get_global_func diff --git a/docs/api/python/index.rst b/docs/api/python/index.rst new file mode 100644 index 000000000000..c359f9570d70 --- /dev/null +++ b/docs/api/python/index.rst @@ -0,0 +1,16 @@ +Python API +========== + +.. toctree:: + :maxdepth: 2 + + tvm + tensor + schedule + build + module + ndarray + collection + node + function + dev diff --git a/docs/api/python/module.rst b/docs/api/python/module.rst new file mode 100644 index 000000000000..112a92e367ab --- /dev/null +++ b/docs/api/python/module.rst @@ -0,0 +1,9 @@ +tvm.module +---------- +.. autoclass:: tvm.module.Module + :members: + :inherited-members: + +.. autofunction:: tvm.module.load + +.. autofunction:: tvm.module.enabled diff --git a/docs/api/python/ndarray.rst b/docs/api/python/ndarray.rst new file mode 100644 index 000000000000..f636860946ba --- /dev/null +++ b/docs/api/python/ndarray.rst @@ -0,0 +1,16 @@ +tvm.ndarray +----------- +tvm.ndarray provides a minimum runtime array API to testing out +the correctness of the program. + +.. autoclass:: tvm.ndarray.TVMContext + :members: + +.. autoclass:: tvm.ndarray.NDArray + :members: + :inherited-members: + +.. autofunction:: tvm.cpu +.. autofunction:: tvm.gpu +.. autofunction:: tvm.opencl +.. autofunction:: tvm.ndarray.array diff --git a/docs/api/python/schedule.rst b/docs/api/python/schedule.rst new file mode 100644 index 000000000000..4596ba62b997 --- /dev/null +++ b/docs/api/python/schedule.rst @@ -0,0 +1,17 @@ +tvm.schedule +------------ +The `tvm.schedule` module contains classes of scheduling +structure of tvm. + +.. autoclass:: tvm.schedule.IterVar + :members: + +.. autofunction:: tvm.create_schedule + +.. autoclass:: tvm.schedule.Schedule + :members: + :inherited-members: + +.. autoclass:: tvm.schedule.Stage + :members: + :inherited-members: diff --git a/docs/api/python/tensor.rst b/docs/api/python/tensor.rst new file mode 100644 index 000000000000..b7c2cd10baba --- /dev/null +++ b/docs/api/python/tensor.rst @@ -0,0 +1,28 @@ +tvm.tensor +---------- +The `tvm.tensor` module contains declaration of Tensor +and Operation class for computation declaration. + +.. autoclass:: tvm.tensor.Tensor + :members: + :inherited-members: + +.. autoclass:: tvm.tensor.Operation + :members: + :inherited-members: + +.. autoclass:: tvm.tensor.ComputeOp + :members: + :show-inheritance: + +.. autoclass:: tvm.tensor.PlaceholderOp + :members: + :show-inheritance: + +.. autoclass:: tvm.tensor.ScanOp + :members: + :show-inheritance: + +.. autoclass:: tvm.tensor.ExternOp + :members: + :show-inheritance: diff --git a/docs/api/python/tvm.rst b/docs/api/python/tvm.rst new file mode 100644 index 000000000000..b16911aa18de --- /dev/null +++ b/docs/api/python/tvm.rst @@ -0,0 +1,40 @@ +tvm +--- +tvm is a library root namespace contains functions for +declaring computation. + +.. autosummary:: + + tvm.load_json + tvm.save_json + tvm.var + tvm.const + tvm.convert + tvm.placeholder + tvm.compute + tvm.scan + tvm.extern + tvm.call_packed + tvm.decl_buffer + tvm.reduce_axis + tvm.thread_axis + tvm.sum + tvm.min + tvm.max + +.. autofunction:: tvm.load_json +.. autofunction:: tvm.save_json +.. autofunction:: tvm.var +.. autofunction:: tvm.const +.. autofunction:: tvm.convert +.. autofunction:: tvm.placeholder +.. autofunction:: tvm.compute +.. autofunction:: tvm.scan +.. autofunction:: tvm.extern +.. autofunction:: tvm.call_packed +.. autofunction:: tvm.decl_buffer +.. autofunction:: tvm.reduce_axis +.. autofunction:: tvm.thread_axis +.. autofunction:: tvm.sum +.. autofunction:: tvm.min +.. autofunction:: tvm.max diff --git a/docs/conf.py b/docs/conf.py index e887163f3e42..e97033d4960c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -26,7 +26,6 @@ libpath = os.path.join(curr_path, '../python/') sys.path.insert(0, libpath) - # -- General configuration ------------------------------------------------ # General information about the project. @@ -51,11 +50,15 @@ extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', + 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon', 'sphinx.ext.mathjax', 'sphinx_gallery.gen_gallery', ] +breathe_projects = {'tvm' : 'doxygen/xml/'} +breathe_default_project = 'tvm' + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -132,7 +135,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] +html_static_path = ['_static'] # Output file base name for HTML help builder. htmlhelp_basename = project + 'doc' @@ -153,12 +156,19 @@ def run_doxygen(folder): """Run the doxygen make command in the designated folder.""" try: - retcode = subprocess.call("cd %s; make doxygen" % folder, shell=True) + retcode = subprocess.call("cd %s; make doc" % folder, shell=True) if retcode < 0: sys.stderr.write("doxygen terminated by signal %s" % (-retcode)) except OSError as e: sys.stderr.write("doxygen execution failed: %s" % e) +intersphinx_mapping = { + 'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None), + 'numpy': ('http://docs.scipy.org/doc/numpy/', None), + 'scipy': ('http://docs.scipy.org/doc/scipy/reference', None), + 'matplotlib': ('http://matplotlib.org/', None), +} + examples_dirs = ['../tutorials/python'] gallery_dirs = ['tutorials'] @@ -171,18 +181,24 @@ def generate_doxygen_xml(app): def setup(app): # Add hook for building doxygen xml when needed # no c++ API for now - # app.connect("builder-inited", generate_doxygen_xml) + app.connect("builder-inited", generate_doxygen_xml) + app.add_stylesheet('css/tvm_theme.css') app.add_config_value('recommonmark_config', { 'url_resolver': lambda url: github_doc_root + url, }, True) app.add_transform(AutoStructify) + sphinx_gallery_conf = { 'backreferences_dir': 'gen_modules/backreferences', - 'doc_module': ('tvm'), - 'reference_url': { - 'tvm': None - }, + 'doc_module': ('tvm', 'numpy'), +'reference_url': { + 'tvm': None, + 'matplotlib': 'http://matplotlib.org', + 'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'}, 'examples_dirs': examples_dirs, - 'gallery_dirs': gallery_dirs + 'gallery_dirs': gallery_dirs, + 'find_mayavi_figures': False, + 'filename_pattern': '.py', + 'expected_failing_examples': [] } diff --git a/docs/index.rst b/docs/index.rst index e025888d0f53..f265044b832b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -9,7 +9,7 @@ Contents .. toctree:: :maxdepth: 1 - how_to/contribute how_to/install tutorials/index - python/api + api/python/index + how_to/contribute diff --git a/docs/python/api.rst b/docs/python/api.rst deleted file mode 100644 index 017a6fc3bf92..000000000000 --- a/docs/python/api.rst +++ /dev/null @@ -1,108 +0,0 @@ -Python API -========== - -tvm ---- -tvm is a library root namespace contains functions for -declaring computation. - -.. autofunction:: tvm.load_json - -.. autofunction:: tvm.save_json - -.. autofunction:: tvm.var - -.. autofunction:: tvm.convert - -.. autofunction:: tvm.placeholder - -.. autofunction:: tvm.compute - -.. autofunction:: tvm.scan - -.. autofunction:: tvm.extern - -.. autofunction:: tvm.reduce_axis - -.. autofunction:: tvm.sum - -tvm.tensor ----------- -The `tvm.tensor` module contains declaration of Tensor -and Operation class for computation declaration. - -.. autoclass:: tvm.tensor.Tensor - :members: - :inherited-members: - -.. autoclass:: tvm.tensor.Operation - :members: - :inherited-members: - -tvm.schedule ------------- -.. autofunction:: tvm.create_schedule - -.. autoclass:: tvm.schedule.Schedule - :members: - -.. autoclass:: tvm.schedule.Stage - :members: - -tvm.build ---------- - -.. autofunction:: tvm.lower -.. autofunction:: tvm.build - -tvm.ndarray ------------ -tvm.ndarray provides a minimum runtime array API to testing out -the correctness of the program. - -.. autofunction:: tvm.cpu -.. autofunction:: tvm.gpu -.. autofunction:: tvm.vpi -.. autofunction:: tvm.opencl -.. autofunction:: tvm.ndarray.array - -.. autoclass:: tvm.ndarray.TVMContext - :members: - -.. autoclass:: tvm.ndarray.NDArray - :members: - :inherited-members: - -tvm.Function ------------- - -.. autofunction:: tvm.register_func - -.. autoclass:: tvm.Function - -tvm.module ----------- -.. autofunction:: tvm.module.load - -.. autofunction:: tvm.module.load - -.. autoclass:: tvm.module.Module - :members: - :inherited-members: - -tvm.node --------- -tvm.node provides - -.. autofunction:: tvm.register_node - -.. autoclass:: tvm.node.NodeBase - :members: - -.. autoclass:: tvm.node.Node - :members: - -tvm.expr --------- -.. automodule:: tvm.expr - :members: diff --git a/python/tvm/_ctypes/_function.py b/python/tvm/_ctypes/_function.py index d5898be550c0..108136802ce9 100644 --- a/python/tvm/_ctypes/_function.py +++ b/python/tvm/_ctypes/_function.py @@ -136,7 +136,27 @@ def _make_tvm_args(args, temp_args): class Function(object): - """The Function object used in TVM. + """The PackedFunc object used in TVM. + + Function plays an key role to bridge front and backend in TVM. + Function provide a type-erased interface, you can call function with positional arguments. + + The compiled module returns Function. + TVM backend also registers and exposes its API as Functions. + For example, the developer function exposed in tvm.ir_pass are actually + C++ functions that are registered as PackedFunc + + The following are list of common usage scenario of tvm.Function. + + - Automatic exposure of C++ API into python + - To call PackedFunc from python side + - To call python callbacks to inspect results in generated code + - Bring python hook into C++ backend + + See Also + -------- + tvm.register_func: How to register global function. + tvm.get_global_func: How to get global function. """ __slots__ = ["handle", "is_global"] # pylint: disable=no-member @@ -299,6 +319,26 @@ def register_func(func_name, f=None): ------- fregister : function Register function if f is not specified. + + Examples + -------- + The following code registers my_packed_func as global function. + Note that we simply get it back from global function table to invoke + it from python side. However, we can also invoke the same function + from C++ backend, or in the compiled TVM code. + + .. code-block:: python + + targs = (10, 10.0, "hello") + @tvm.register_func + def my_packed_func(*args): + assert(tuple(args) == targs) + return 10 + # Get it out from global function table + f = tvm.get_global_func("my_packed_func") + assert isinstance(f, tvm.nd.Function) + y = f(*targs) + assert y == 10 """ if callable(func_name): f = func_name @@ -328,7 +368,7 @@ def get_global_func(name): Returns ------- - func : tvm.nd.Function + func : tvm.Function The function to be returned. """ handle = FunctionHandle() @@ -355,27 +395,60 @@ def list_global_func_names(): return fnames -def _init_api_functions(root_namespace): - """List and add all the functions to current module.""" - module_obj = sys.modules["%s.api" % root_namespace] - module_internal = sys.modules["%s._api_internal" % root_namespace] +def _get_api(f): + flocal = f + def my_api_func(*args): + """ + + This is a type erased API that calls into Global PackedFunc. + These APIs corresponds to functions registered from C++ backend + and can be used as developer functions. + + args : list + The positional arguments to the function call. + + Returns + ------- + value : int, float, None, Node or Function + The result of the API function call. + """ + return flocal(*args) + return my_api_func + +def _init_api(mod): + """Initialize api for a given module name + + mod : str + The name of the module. + """ + module = sys.modules[mod] namespace_match = { - "_make_": sys.modules["%s.make" % root_namespace], - "_arith_": sys.modules["%s.arith" % root_namespace], - "_pass_": sys.modules["%s.ir_pass" % root_namespace], - "_codegen_": sys.modules["%s.codegen" % root_namespace], - "_module_": sys.modules["%s.module" % root_namespace], - "_schedule_": sys.modules["%s.schedule" % root_namespace] + "_make_": "tvm.make", + "_arith_": "tvm.arith", + "_pass_": "tvm.ir_pass", + "_codegen_": "tvm.codegen", + "_module_": "tvm.module", + "_schedule_": "tvm.schedule" } for name in list_global_func_names(): fname = name - target_module = module_internal if name.startswith('_') else module_obj + target = "tvm.api" for k, v in namespace_match.items(): if name.startswith(k): fname = name[len(k):] - target_module = v + target = v + if target != mod: + continue + if mod == "tvm.api" and name.startswith("_"): + target_module = sys.modules["tvm._api_internal"] + else: + target_module = module + f = get_global_func(name) - setattr(target_module, fname, f) + ff = _get_api(f) + ff.__name__ = fname + ff.__doc__ = ("TVM PackedFunc %s. " % fname) + setattr(target_module, ff.__name__, ff) def _init_module_module(module_class): diff --git a/python/tvm/_ctypes/_node.py b/python/tvm/_ctypes/_node.py index 6f8d8905e1c5..29c4c1224a34 100644 --- a/python/tvm/_ctypes/_node.py +++ b/python/tvm/_ctypes/_node.py @@ -186,13 +186,14 @@ def register_node(type_key=None): type_key : str or cls The type key of the node """ + node_name = type_key if isinstance(type_key, str) else type_key.__name__ + + def register(cls): + """internal register function""" + NODE_TYPE[node_name] = cls + return cls + if isinstance(type_key, str): - def register(cls): - """internal register function""" - NODE_TYPE[type_key] = cls - return cls return register else: - cls = type_key - NODE_TYPE[cls.__name__] = cls - return cls + return register(type_key) diff --git a/python/tvm/api.py b/python/tvm/api.py index fffdd20bf86f..1f697558917b 100644 --- a/python/tvm/api.py +++ b/python/tvm/api.py @@ -8,13 +8,14 @@ from ._ctypes._node import register_node, NodeBase from ._ctypes._node import convert_to_node as _convert_to_node from ._ctypes._function import Function -from ._ctypes._function import _init_api_functions, register_func, get_global_func +from ._ctypes._function import _init_api, register_func, get_global_func from ._ctypes._function import convert_to_tvm_func as _convert_tvm_func from . import _api_internal from . import _base from . import make as _make from . import expr as _expr from . import tensor as _tensor +from . import schedule as _schedule from . import collections as _collections int32 = "int32" @@ -31,6 +32,27 @@ def const(value, dtype=None): return _api_internal._const(value, dtype) +def convert(value): + """Convert value to TVM node or function. + + Parameters + ---------- + value : python value + + Returns + ------- + tvm_val : Node or Function + Converted value in TVM + """ + if isinstance(value, (Function, NodeBase)): + return value + + if callable(value): + return _convert_tvm_func(value) + else: + return _convert_to_node(value) + + def load_json(json_str): """Load tvm object from json_str. @@ -179,8 +201,8 @@ def scan(init, update, state_placeholder, inputs=None, name="scan"): .. code-block:: python # The following code is equivalent to numpy.cumsum - m = tvm.Var("m") - n = tvm.Var("n") + m = tvm.var("m") + n = tvm.var("n") X = tvm.placeholder((m, n), name="X") s_state = tvm.placeholder((m, n)) s_init = tvm.compute((1, n), lambda _, i: X[0, i]) @@ -281,7 +303,10 @@ def decl_buffer(shape, dtype=None, strides=None, byte_offset=None, offset_alignment=0): - """Decleare a new symbolic buffer + """Decleare a new symbolic buffer. + + Normally buffer is created automatically during lower and build. + This is only needed if user want to specify their own buffer layout. Parameters ---------- @@ -370,6 +395,11 @@ def thread_axis(dom=None, tag='', name=''): name : str, optional The name of the var. + + Returns + ------- + axis : IterVar + The thread itervar. """ if isinstance(dom, _base.string_types): tag, dom = dom, None @@ -446,7 +476,7 @@ def min(lhs, rhs=None, axis=None, where=None): """ if rhs and axis: raise ValueError("Can only take one argument, rhs or axis") - if isinstance(rhs, (_collections.IterVar, list)): + if isinstance(rhs, (_schedule.IterVar, list)): axis, rhs = rhs, axis if rhs: return _make.Min(lhs, rhs) @@ -479,7 +509,7 @@ def max(lhs, rhs=None, axis=None, where=None): """ if rhs and axis: raise ValueError("Can only take one argument, rhs or axis") - if isinstance(rhs, (_collections.IterVar, list)): + if isinstance(rhs, (_schedule.IterVar, list)): axis, rhs = rhs, axis if rhs: return _make.Max(lhs, rhs) @@ -487,26 +517,4 @@ def max(lhs, rhs=None, axis=None, where=None): x = _make.Reduce("Max", expr, axis, where) return x - -def convert(value): - """Convert value to TVM node or function. - - Parameters - ---------- - value : python value - - Returns - ------- - tvm_val : Node or Function - Converted value in TVM - """ - if isinstance(value, (Function, NodeBase)): - return value - - if callable(value): - return _convert_tvm_func(value) - else: - return _convert_to_node(value) - - -_init_api_functions("tvm") +_init_api("tvm.api") diff --git a/python/tvm/arith.py b/python/tvm/arith.py index a8886fcf2150..ed086bfdebe5 100644 --- a/python/tvm/arith.py +++ b/python/tvm/arith.py @@ -2,6 +2,7 @@ from __future__ import absolute_import as _abs from ._ctypes._node import NodeBase, register_node +from ._ctypes._function import _init_api from . import _api_internal class IntSet(NodeBase): @@ -36,3 +37,5 @@ class StrideSet(IntSet): class ModularSet(IntSet): """Represent range of (coeff * x + base) for x in Z """ pass + +_init_api("tvm.arith") diff --git a/python/tvm/build.py b/python/tvm/build.py index 5f3f3298429c..2bd99a185d8f 100644 --- a/python/tvm/build.py +++ b/python/tvm/build.py @@ -134,10 +134,10 @@ def build(sch, fapi = ir_pass.LowerThreadAllreduce(fapi, warp_size) fsplits = [s for s in ir_pass.SplitHostDevice(fapi)] if len(fsplits) > 1: - mhost = codegen.build(fsplits[0], target_host) + mhost = codegen.build_module(fsplits[0], target_host) if target: - mdev = codegen.build(fsplits[1:], target) + mdev = codegen.build_module(fsplits[1:], target) mhost.import_module(mdev) return mhost else: - return codegen.build(fsplits[0], target) + return codegen.build_module(fsplits[0], target) diff --git a/python/tvm/codegen.py b/python/tvm/codegen.py index 02dda155c19a..fbb102d464e8 100644 --- a/python/tvm/codegen.py +++ b/python/tvm/codegen.py @@ -1 +1,39 @@ -"""Code generation related functions""" +"""Code generation related functions.""" +from ._ctypes._function import _init_api + +def build_module(lowered_func, target): + """Build lowered_func into Module. + + Parameters + ---------- + lowered_func : LoweredFunc + The lowered function + + target : str + The target module type. + + Returns + ------- + module : Module + The corressponding module. + """ + return _Build(lowered_func, target) + + +def enabled(target): + """Whether target is enabled for codegen. + + Parameters + ---------- + target : str + The target module type. + + Returns + ------- + enabled : boolean + Whether the target module is enabled. + """ + return _Enabled(target) + + +_init_api("tvm.codegen") diff --git a/python/tvm/collections.py b/python/tvm/collections.py index 40ea219faa46..c2af29661702 100644 --- a/python/tvm/collections.py +++ b/python/tvm/collections.py @@ -2,11 +2,16 @@ from __future__ import absolute_import as _abs from ._ctypes._node import NodeBase, register_node from . import _api_internal -from . import expr as _expr @register_node class Array(NodeBase): - """Array container of TVM""" + """Array container of TVM. + + You do not need to create Array explicitly. + Normally python list and tuple will be converted automatically + to Array during tvm function call. + You may get Array in return values of TVM function call. + """ def __getitem__(self, i): if isinstance(i, slice): start = i.start if i.start is not None else 0 @@ -26,7 +31,13 @@ def __repr__(self): @register_node class Map(NodeBase): - """Map container of TVM""" + """Map container of TVM. + + You do not need to create Map explicitly. + Normally python dict will be converted automatically + to Array during tvm function call. + You may get Map in return values of TVM function call. + """ def __getitem__(self, k): return _api_internal._MapGetItem(self, k) @@ -47,22 +58,12 @@ def __repr__(self): @register_node class Range(NodeBase): - """Represent range in TVM""" - pass - - -@register_node -class IterVar(NodeBase, _expr.ExprOp): - """Represent iteration variable.""" - DataPar = 0 - ThreadIndex = 1 - CommReduce = 2 - Ordered = 3 - DimInfo = 4 - Unrolled = 5 - Vectorized = 6 - Parallelized = 7 + """Represent range in TVM. + You do not need to create Range explicitly. + Python list and tuple will be converted automatically to Range in api functions. + """ + pass @register_node class LoweredFunc(NodeBase): diff --git a/python/tvm/expr.py b/python/tvm/expr.py index a9e9750e8e42..28aa4c92079a 100644 --- a/python/tvm/expr.py +++ b/python/tvm/expr.py @@ -1,4 +1,19 @@ -"""Module to declare Expression class""" +"""Expression AST Node in TVM. + +User do not need to deal with expression AST node directly. +But they can be helpful for developer to do quick proptyping. +While not displayed in the document and python file. +Each expression node have subfields that can be visited from python side. + +For example, you can use addexp.a to get the left operand of an Add node. + +.. code-block:: python + + x = tvm.var("n") + y = x + 2 + assert(isinstance(y, tvm.expr.Add)) + assert(y.a == x) +""" # pylint: disable=missing-docstring from __future__ import absolute_import as _abs from ._ctypes._node import NodeBase, register_node @@ -75,7 +90,7 @@ class LogicalExpr(Expr): @register_node("Variable") class Var(Expr): - """Symbolic variable expression.""" + """Symbolic variable.""" pass @register_node diff --git a/python/tvm/ir_pass.py b/python/tvm/ir_pass.py index 3ba8c219a5f9..b43a69700010 100644 --- a/python/tvm/ir_pass.py +++ b/python/tvm/ir_pass.py @@ -1 +1,11 @@ -"""Namespace of IR pass functions""" +"""Namespace of IR pass functions. + +This namespace is used for developers. While you do not see any declarations. +The functions are automatically exported from C++ side via PackedFunc. + +Each api is a PackedFunc that can be called in a positional argument manner. +You can read "include/tvm/pass.h" for the function signature of these functions. +""" +from ._ctypes._function import _init_api + +_init_api("tvm.ir_pass") diff --git a/python/tvm/make.py b/python/tvm/make.py index e874a45b9a2a..50613701cc23 100644 --- a/python/tvm/make.py +++ b/python/tvm/make.py @@ -1 +1,11 @@ -"""namespace of IR node builder make function""" +"""namespace of IR node builder make function + +This namespace is used for developers. While you do not see any declarations. +The functions are automatically exported from C++ side via PackedFunc. + +Each api is a PackedFunc that can be called in a positional argument manner. +You can use make function to build the IR node. +""" +from ._ctypes._function import _init_api + +_init_api("tvm.make") diff --git a/python/tvm/module.py b/python/tvm/module.py index d4c90c6df472..6c9c5f1a365a 100644 --- a/python/tvm/module.py +++ b/python/tvm/module.py @@ -1,6 +1,8 @@ """Runtime module related stuffs""" from __future__ import absolute_import as _abs from ._ctypes._function import ModuleBase, _init_module_module +from ._ctypes._function import _init_api + class Module(ModuleBase): """Module container of all TVM generated functions""" @@ -28,7 +30,7 @@ def imported_modules(self): Returns ---------- - modules : list of Modules + modules : list of Module The module """ nmod = _ImportsSize(self) @@ -58,7 +60,36 @@ def load(path, fmt=""): fmt : str, optional The format of the file, if not specified it will be inferred from suffix of the file. + + Returns + ------- + module : Module + The loaded module """ return _LoadFromFile(path, fmt) + +def enabled(target): + """Whether module runtime is enabled for target + + Parameters + ---------- + target : str + The target device type. + + Returns + ------- + enabled : bool + Whether runtime is enabled. + + Examples + -------- + The following code checks if gpu is enabled. + + >>> tvm.module.enabled("gpu") + """ + return _Enabled(target) + + +_init_api("tvm.module") _init_module_module(Module) diff --git a/python/tvm/schedule.py b/python/tvm/schedule.py index 4d22c7b40023..1713b5aa85f2 100644 --- a/python/tvm/schedule.py +++ b/python/tvm/schedule.py @@ -3,11 +3,14 @@ from ._ctypes._node import NodeBase, register_node from . import _api_internal from . import tensor as _tensor +from . import expr as _expr from . import collections as _collections +from ._ctypes._function import _init_api + @register_node class Buffer(NodeBase): - """Represent a Buffer in TVM.""" + """Represent a symbolic buffer in TVM.""" pass @register_node @@ -20,6 +23,29 @@ class Fuse(NodeBase): """Fuse operation on axis.""" pass +@register_node +class IterVar(NodeBase, _expr.ExprOp): + """Represent iteration variable. + + IterVar is normally created by Operation, to represent + axis iterations in the computation. + It can also created by schedule primitives like :any:`tvm.schedule.Stage.split`. + + See Also + -------- + tvm.thread_axis: Create thread axis IterVar. + tvm.reduce_axis: Create reduce axis IterVar. + """ + DataPar = 0 + ThreadIndex = 1 + CommReduce = 2 + Ordered = 3 + DimInfo = 4 + Unrolled = 5 + Vectorized = 6 + Parallelized = 7 + +_tensor.iter_var_cls = IterVar def create_schedule(ops): """Create a schedule for list of ops @@ -343,3 +369,5 @@ def parallel(self, var): The iteration to be parallelized. """ _api_internal._StageParallel(self, var) + +_init_api("tvm.schedule") diff --git a/python/tvm/stmt.py b/python/tvm/stmt.py index 89c923404726..186f4cb31c1c 100644 --- a/python/tvm/stmt.py +++ b/python/tvm/stmt.py @@ -1,4 +1,18 @@ -"""Statement classes""" +"""Statement AST Node in TVM. + +User do not need to deal with AST node directly. +But they can be helpful for developer to do quick proptyping. +While not displayed in the document and python file. +Each statement node have subfields that can be visited from python side. + +.. code-block:: python + + x = tvm.var("n") + a = tvm.var("array", tvm.handle) + st = tvm.make.Store(a, x + 1, 1) + assert isinstance(st, tvm.stmt.Store) + assert(st.buffer_var == a) +""" from __future__ import absolute_import as _abs from ._ctypes._node import NodeBase, register_node diff --git a/python/tvm/tensor.py b/python/tvm/tensor.py index 9c42332b1791..a663490487f1 100644 --- a/python/tvm/tensor.py +++ b/python/tvm/tensor.py @@ -1,7 +1,7 @@ -"""Tensor related abstractions""" +"""Tensor and Computation abstraction objects""" +# pylint: disable=invalid-name from __future__ import absolute_import as _abs from ._ctypes._node import NodeBase, SliceBase, register_node, convert_to_node -from . import collections as _collections from . import _api_internal from . import make as _make from . import expr as _expr @@ -19,6 +19,7 @@ def __getitem__(self, indices): indices = (indices,) return TensorSlice(self.tensor, self.indices + indices) +itervar_cls = None @register_node class Tensor(NodeBase): @@ -30,10 +31,10 @@ def __call__(self, *indices): indices = convert_to_node(indices) args = [] for x in indices: - if isinstance(x, _collections.IterVar): - args.append(x.var) - elif isinstance(x, _expr.Expr): + if isinstance(x, _expr.Expr): args.append(x) + elif isinstance(x, iter_var_cls): + args.append(x.var) else: raise ValueError("The indices must be expression") @@ -95,20 +96,35 @@ def output(self, index): """ return _api_internal._OpGetOutput(self, index) + @register_node class PlaceholderOp(Operation): """Placeholder operation.""" pass + @register_node class ComputeOp(Operation): """Compute operation.""" - pass + @property + def axis(self): + """Represent axis of IterVar, only defined when it is a ComputeOp""" + return self.__getattr__("axis") + + @property + def reduce_axis(self): + """Represent axis of reductions, only defined when it is a ComputeOp""" + return self.__getattr__("reduce_axis") + @register_node class ScanOp(Operation): """Scan operation.""" - pass + @property + def scan_axis(self): + """Represent axis of scan, only defined when it is a ScanOp""" + return self.__getattr__("scan_axis") + @register_node class ExternOp(Operation): diff --git a/src/api/api_codegen.cc b/src/api/api_codegen.cc index 9616dccb306a..26f87b352be6 100644 --- a/src/api/api_codegen.cc +++ b/src/api/api_codegen.cc @@ -12,7 +12,7 @@ namespace tvm { namespace codegen { -TVM_REGISTER_API(_codegen_build) +TVM_REGISTER_API(_codegen__Build) .set_body([](TVMArgs args, TVMRetValue *ret) { if (args[0].IsNodeType()) { *ret = Build({args[0]}, args[1]); @@ -21,7 +21,7 @@ TVM_REGISTER_API(_codegen_build) } }); -TVM_REGISTER_API(_codegen_enabled) +TVM_REGISTER_API(_codegen__Enabled) .set_body([](TVMArgs args, TVMRetValue *ret) { *ret = TargetEnabled(args[0]); }); diff --git a/src/runtime/module.cc b/src/runtime/module.cc index 112dca59f50d..6529bb56db5e 100644 --- a/src/runtime/module.cc +++ b/src/runtime/module.cc @@ -97,7 +97,7 @@ bool RuntimeEnabled(const std::string& target) { return runtime::Registry::Get(load_f_name) != nullptr; } -TVM_REGISTER_GLOBAL(_module_enabled) +TVM_REGISTER_GLOBAL(_module__Enabled) .set_body([](TVMArgs args, TVMRetValue *ret) { *ret = RuntimeEnabled(args[0]); }); diff --git a/tests/python/integration/test_dot.py b/tests/python/integration/test_dot.py index d74deae1ab7c..2aa3d0ddfe62 100644 --- a/tests/python/integration/test_dot.py +++ b/tests/python/integration/test_dot.py @@ -38,7 +38,7 @@ def verify(target): if not tvm.codegen.enabled(target): print("Target %s is not enabled" % target) return - f = tvm.codegen.build(fapi, target) + f = tvm.codegen.build_module(fapi, target) # verify ctx = tvm.cpu(0) a = tvm.nd.array(np.random.uniform(size=(nn,)).astype(A.dtype), ctx) diff --git a/tests/python/unittest/test_codegen_device.py b/tests/python/unittest/test_codegen_device.py index 8ff55acf4e94..90c28a8af363 100644 --- a/tests/python/unittest/test_codegen_device.py +++ b/tests/python/unittest/test_codegen_device.py @@ -32,8 +32,8 @@ def check_target(device, host="stackvm"): if not tvm.codegen.enabled(device): return ctx = tvm.gpu(0) if device == "cuda" else tvm.cl(0) - mhost = tvm.codegen.build(fsplits[0], host) - mdev = tvm.codegen.build(fsplits[1:], device) + mhost = tvm.codegen.build_module(fsplits[0], host) + mdev = tvm.codegen.build_module(fsplits[1:], device) mhost.import_module(mdev) code = mdev.get_source() f = mhost.entry_func @@ -54,8 +54,8 @@ def check_module_save(device, host="stackvm"): return ctx = tvm.gpu(0) if device == "cuda" else tvm.cl(0) fmt = "ptx" if device == "cuda" else "cl" - mhost = tvm.codegen.build(fsplits[0], host) - mdev = tvm.codegen.build(fsplits[1:], device) + mhost = tvm.codegen.build_module(fsplits[0], host) + mdev = tvm.codegen.build_module(fsplits[1:], device) temp = testing.tempdir() mpath = temp.relpath("test.%s" % fmt) mdev.save(mpath) diff --git a/tests/python/unittest/test_codegen_vm_basic.py b/tests/python/unittest/test_codegen_vm_basic.py index e41656e53377..b9d11c3a02c0 100644 --- a/tests/python/unittest/test_codegen_vm_basic.py +++ b/tests/python/unittest/test_codegen_vm_basic.py @@ -5,7 +5,7 @@ def run_jit(fapi, check): for target in ["llvm", "stackvm"]: if not tvm.codegen.enabled(target): continue - f = tvm.codegen.build(fapi, target) + f = tvm.codegen.build_module(fapi, target) s = f.get_source() check(f) diff --git a/tests/python/unittest/test_lang_basic.py b/tests/python/unittest/test_lang_basic.py index e9594578acf5..63a109d3c8df 100644 --- a/tests/python/unittest/test_lang_basic.py +++ b/tests/python/unittest/test_lang_basic.py @@ -29,6 +29,13 @@ def test_ir(): stmt = tvm.make.Evaluate(z) assert isinstance(stmt, tvm.stmt.Evaluate) +def test_ir2(): + x = tvm.var("n") + a = tvm.var("array", tvm.handle) + st = tvm.make.Store(a, x + 1, 1) + assert isinstance(st, tvm.stmt.Store) + assert(st.buffer_var == a) + def test_let(): x = tvm.var('x') y = tvm.var('y') diff --git a/tests/python/unittest/test_lang_schedule.py b/tests/python/unittest/test_lang_schedule.py index de6c5314095b..5fed5a23f750 100644 --- a/tests/python/unittest/test_lang_schedule.py +++ b/tests/python/unittest/test_lang_schedule.py @@ -86,8 +86,8 @@ def test_vectorize(): xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5) s[T].vectorize(yi) s[T].unroll(xi) - UNROLL = tvm.collections.IterVar.Unrolled - VECTORIZE = tvm.collections.IterVar.Vectorized + UNROLL = tvm.schedule.IterVar.Unrolled + VECTORIZE = tvm.schedule.IterVar.Vectorized assert s[T].iter_var_attrs[xi].iter_type == UNROLL assert s[T].iter_var_attrs[yi].iter_type == VECTORIZE diff --git a/tests/python/unittest/test_module_load.py b/tests/python/unittest/test_module_load.py index 7003b3f92fe4..73de2f6c4fa7 100644 --- a/tests/python/unittest/test_module_load.py +++ b/tests/python/unittest/test_module_load.py @@ -36,7 +36,7 @@ def save_object(names): tvm.make.Load(dtype, Ab.data, i) + 1, i + 1)) fapi = tvm.ir_pass.MakeAPI(stmt, "ramp", [Ab], 0) - m = tvm.codegen.build(fapi, "llvm") + m = tvm.codegen.build_module(fapi, "llvm") for name in names: m.save(name) diff --git a/tests/verilog/integration/test_codegen_verilog.py b/tests/verilog/integration/test_codegen_verilog.py index d8eba1e12188..7003fa936bd1 100644 --- a/tests/verilog/integration/test_codegen_verilog.py +++ b/tests/verilog/integration/test_codegen_verilog.py @@ -48,8 +48,8 @@ def check_target(device, host="stackvm"): if not tvm.codegen.enabled(device): return ctx = tvm.vpi(0) - mhost = tvm.codegen.build(fsplits[0], host) - mdev = tvm.codegen.build(fsplits[1:], device) + mhost = tvm.codegen.build_module(fsplits[0], host) + mdev = tvm.codegen.build_module(fsplits[1:], device) mhost.import_module(mdev) code = mdev.get_source() f = mhost.entry_func diff --git a/tutorials/python/README.txt b/tutorials/python/README.txt index 8c8349b44ef6..66b1b9a20ff1 100644 --- a/tutorials/python/README.txt +++ b/tutorials/python/README.txt @@ -1,6 +1,3 @@ -.. _tutorials-index: - -TVM Tutorials -============= - -.. _notebook_examples: +Tutorials +========= +These tutorials are tutorial generated by sphinx-gallery.