Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 18 additions & 18 deletions tutorials/auto_scheduler/tune_conv2d_layer_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,17 +157,17 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
# print the equivalent python schedule API, and build the binary again.

# Load the measuremnt record for the best schedule
inp, res = auto_scheduler.load_best("conv2d.json", task.workload_key)
# inp, res = auto_scheduler.load_best("conv2d.json", task.workload_key)

# Print equivalent python schedule API. This can be used for debugging and
# learning the behavior of the auto-scheduler.
print("Equivalent python schedule:")
print(task.compute_dag.print_python_code_from_state(inp.state))
# print(task.compute_dag.print_python_code_from_state(inp.state))

# Rebuild the binary. This shows how you can apply the best schedule from a
# log file without reruning the search again.
sch, args = task.compute_dag.apply_steps_from_state(inp.state)
func = tvm.build(sch, args, target)
# sch, args = task.compute_dag.apply_steps_from_state(inp.state)
# func = tvm.build(sch, args, target)

######################################################################
# A more complicated example is to resume the search.
Expand All @@ -176,19 +176,19 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
# In the example below we resume the status and do more 5 trials.


log_file = "conv2d.json"
cost_model = auto_scheduler.XGBModel()
cost_model.update_from_file(log_file)
search_policy = auto_scheduler.SketchPolicy(
task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file)]
)
measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=5,
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
sch, args = auto_scheduler.auto_schedule(task, search_policy, tuning_options=tune_option)
# log_file = "conv2d.json"
# cost_model = auto_scheduler.XGBModel()
# cost_model.update_from_file(log_file)
# search_policy = auto_scheduler.SketchPolicy(
# task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file)]
# )
# measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300)
# tune_option = auto_scheduler.TuningOptions(
# num_measure_trials=5,
# runner=measure_ctx.runner,
# measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
# )
# sch, args = auto_scheduler.auto_schedule(task, search_policy, tuning_options=tune_option)

# Kill the measurement process
del measure_ctx
# del measure_ctx
8 changes: 4 additions & 4 deletions tutorials/auto_scheduler/tune_matmul_x86.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,17 +141,17 @@ def matmul_add(N, L, M, dtype):
# print the equivalent python schedule API, and build the binary again.

# Load the measuremnt record for the best schedule
inp, res = auto_scheduler.load_best("matmul.json", task.workload_key)
# inp, res = auto_scheduler.load_best("matmul.json", task.workload_key)

# Print equivalent python schedule API. This can be used for debugging and
# learning the behavior of the auto-scheduler.
print("Equivalent python schedule:")
print(task.compute_dag.print_python_code_from_state(inp.state))
# print(task.compute_dag.print_python_code_from_state(inp.state))

# Rebuild the binary. This shows how you can apply the best schedule from a
# log file without reruning the search again.
sch, args = task.compute_dag.apply_steps_from_state(inp.state)
func = tvm.build(sch, args)
# sch, args = task.compute_dag.apply_steps_from_state(inp.state)
# func = tvm.build(sch, args)

######################################################################
# A more complicated example is to resume the search.
Expand Down