diff --git a/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py b/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py index b800eb469ec5..cb2126dec911 100644 --- a/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py +++ b/tutorials/auto_scheduler/tune_conv2d_layer_cuda.py @@ -157,17 +157,17 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding): # print the equivalent python schedule API, and build the binary again. # Load the measuremnt record for the best schedule -inp, res = auto_scheduler.load_best("conv2d.json", task.workload_key) +# inp, res = auto_scheduler.load_best("conv2d.json", task.workload_key) # Print equivalent python schedule API. This can be used for debugging and # learning the behavior of the auto-scheduler. print("Equivalent python schedule:") -print(task.compute_dag.print_python_code_from_state(inp.state)) +# print(task.compute_dag.print_python_code_from_state(inp.state)) # Rebuild the binary. This shows how you can apply the best schedule from a # log file without reruning the search again. -sch, args = task.compute_dag.apply_steps_from_state(inp.state) -func = tvm.build(sch, args, target) +# sch, args = task.compute_dag.apply_steps_from_state(inp.state) +# func = tvm.build(sch, args, target) ###################################################################### # A more complicated example is to resume the search. @@ -176,19 +176,19 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding): # In the example below we resume the status and do more 5 trials. -log_file = "conv2d.json" -cost_model = auto_scheduler.XGBModel() -cost_model.update_from_file(log_file) -search_policy = auto_scheduler.SketchPolicy( - task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file)] -) -measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300) -tune_option = auto_scheduler.TuningOptions( - num_measure_trials=5, - runner=measure_ctx.runner, - measure_callbacks=[auto_scheduler.RecordToFile(log_file)], -) -sch, args = auto_scheduler.auto_schedule(task, search_policy, tuning_options=tune_option) +# log_file = "conv2d.json" +# cost_model = auto_scheduler.XGBModel() +# cost_model.update_from_file(log_file) +# search_policy = auto_scheduler.SketchPolicy( +# task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file)] +# ) +# measure_ctx = auto_scheduler.LocalRPCMeasureContext(min_repeat_ms=300) +# tune_option = auto_scheduler.TuningOptions( +# num_measure_trials=5, +# runner=measure_ctx.runner, +# measure_callbacks=[auto_scheduler.RecordToFile(log_file)], +# ) +# sch, args = auto_scheduler.auto_schedule(task, search_policy, tuning_options=tune_option) # Kill the measurement process -del measure_ctx +# del measure_ctx diff --git a/tutorials/auto_scheduler/tune_matmul_x86.py b/tutorials/auto_scheduler/tune_matmul_x86.py index 35c47444e081..5c039b17a958 100644 --- a/tutorials/auto_scheduler/tune_matmul_x86.py +++ b/tutorials/auto_scheduler/tune_matmul_x86.py @@ -141,17 +141,17 @@ def matmul_add(N, L, M, dtype): # print the equivalent python schedule API, and build the binary again. # Load the measuremnt record for the best schedule -inp, res = auto_scheduler.load_best("matmul.json", task.workload_key) +# inp, res = auto_scheduler.load_best("matmul.json", task.workload_key) # Print equivalent python schedule API. This can be used for debugging and # learning the behavior of the auto-scheduler. print("Equivalent python schedule:") -print(task.compute_dag.print_python_code_from_state(inp.state)) +# print(task.compute_dag.print_python_code_from_state(inp.state)) # Rebuild the binary. This shows how you can apply the best schedule from a # log file without reruning the search again. -sch, args = task.compute_dag.apply_steps_from_state(inp.state) -func = tvm.build(sch, args) +# sch, args = task.compute_dag.apply_steps_from_state(inp.state) +# func = tvm.build(sch, args) ###################################################################### # A more complicated example is to resume the search.