import os
import nn
import tvm
import argparse
import mxnet as mx
import numpy as np
from tvm import autotvm
import nnvm.compiler
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
import tvm.contrib.graph_runtime as runtime

def parse_args():
    parser = argparse.ArgumentParser(description='Auto-tuning CNN for x86 CPU')
    parser.add_argument('network', type=str)
    parser.add_argument('size', type=str, default='400,300')
    parser.add_argument('--target', type=str, default='llvm -mcpu=core-avx2')
    parser.add_argument('--dtype', type=str, default='float32')
    parser.add_argument('--tuner', type=str, default='random', help='random or xgb')
    parser.add_argument('-j', '--num_threads', type=int, default=4)
    args = parser.parse_args()
    args.shape = [1,3] + list(map(int, args.size.split(',')))
    os.environ["TVM_NUM_THREADS"] = str(args.num_threads)
    args.log_file = 'models/autotvm/{}.{}.log'.format(args.network, args.size)
    return args

# You can skip the implementation of this function for this tutorial.
def tune_kernels(tasks, target,
                 measure_option,
                 tuner='gridsearch',
                 early_stopping=None,
                 log_filename='tuning.log'):

    for i, tsk in enumerate(tasks):
        prefix = "[Task %2d/%2d] " % (i+1, len(tasks))

        # converting conv2d tasks to conv2d_NCHWc tasks
        op_name = tsk.workload[0]
        if op_name == 'conv2d':
            func_create = 'topi_x86_conv2d_NCHWc'
        elif op_name == 'depthwise_conv2d_nchw':
            func_create = 'topi_x86_depthwise_conv2d_NCHWc_from_nchw'
        else:
            raise ValueError("Tuning {} is not supported on x86".format(op_name))

        task = autotvm.task.create(func_create, args=tsk.args,
                                   target=target, template_key='direct')
        task.workload = tsk.workload

        # create tuner
        if tuner == 'xgb' or tuner == 'xgb-rank':
            tuner_obj = XGBTuner(task, loss_type='rank')
        elif tuner == 'ga':
            tuner_obj = GATuner(task, pop_size=50)
        elif tuner == 'random':
            tuner_obj = RandomTuner(task)
        elif tuner == 'gridsearch':
            tuner_obj = GridSearchTuner(task)
        else:
            raise ValueError("Invalid tuner: " + tuner)

        # do tuning
        n_trial=len(task.config_space)
        tuner_obj.tune(n_trial=n_trial,
                       early_stopping=early_stopping,
                       measure_option=measure_option,
                       callbacks=[
                           autotvm.callback.progress_bar(n_trial, prefix=prefix),
                           autotvm.callback.log_to_file(log_filename)])

def autotune_evalute(args):
    tuning_option = {
    'log_filename': args.log_file,
    'tuner': args.tuner,
    'target': args.target,
    'early_stopping': None,
    'measure_option': autotvm.measure_option(
        builder=autotvm.LocalBuilder(),
        runner=autotvm.LocalRunner(number=10, repeat=1, min_repeat_ms=1000))
    }
    block = nn.get_net(args.network)
    block.initialize()
    block.hybridize()
    data_mx = mx.random.randn(*args.shape).astype(args.dtype)
    block(data_mx)
    net, params = nnvm.frontend.from_mxnet(block)
    tasks = autotvm.task.extract_from_graph(net, target=args.target,
                    shape={'data': args.shape}, dtype=args.dtype,
                    symbols=(nnvm.sym.conv2d,))
    # run tuning tasks
    print("Tuning...")
    tune_kernels(tasks, **tuning_option)

    # compile kernels with history best records
    with autotvm.apply_history_best(args.log_file):
        print("Compile...")
        with nnvm.compiler.build_config(opt_level=3):
            graph, lib, params = nnvm.compiler.build(net, target=args.target,
                    shape={'data': args.shape}, params=params, dtype=args.dtype)

        # upload parameters to device
        ctx = tvm.cpu()
        data_tvm = tvm.nd.array(data_mx.asnumpy())
        module = runtime.create(graph, lib, ctx)
        module.set_input('data', data_tvm)
        module.set_input(**params)

        # evaluate
        print("Evaluate inference time cost...")
        ftimer = module.module.time_evaluator('run', ctx, number=100, repeat=3)
        prof_res = np.array(ftimer().results) * 1000  # convert to millisecond
        print("Mean inference time (std dev): %.2f ms (%.2f ms)" %
              (np.mean(prof_res), np.std(prof_res)))

if __name__ == '__main__':
    args = parse_args()
    autotune_evalute(args)

######################################################################
# Sample Output
# -------------
# The tuning needs to compile many programs and extract feature from them.
# So a high performance CPU is recommended.
# One sample output is listed below.
#
# .. code-block:: bash
#
#    Extract tasks...
#    Tuning...
#    [Task  1/12]  Current/Best:  598.05/2497.63 GFLOPS | Progress: (252/252) | 1357.95 s Done.
#    [Task  2/12]  Current/Best:  522.63/2279.24 GFLOPS | Progress: (784/784) | 3989.60 s Done.
#    [Task  3/12]  Current/Best:  447.33/1927.69 GFLOPS | Progress: (784/784) | 3869.14 s Done.
#    [Task  4/12]  Current/Best:  481.11/1912.34 GFLOPS | Progress: (672/672) | 3274.25 s Done.
#    [Task  5/12]  Current/Best:  414.09/1598.45 GFLOPS | Progress: (672/672) | 2720.78 s Done.
#    [Task  6/12]  Current/Best:  508.96/2273.20 GFLOPS | Progress: (768/768) | 3718.75 s Done.
#    [Task  7/12]  Current/Best:  469.14/1955.79 GFLOPS | Progress: (576/576) | 2665.67 s Done.
#    [Task  8/12]  Current/Best:  230.91/1658.97 GFLOPS | Progress: (576/576) | 2435.01 s Done.
#    [Task  9/12]  Current/Best:  487.75/2295.19 GFLOPS | Progress: (648/648) | 3009.95 s Done.
#    [Task 10/12]  Current/Best:  182.33/1734.45 GFLOPS | Progress: (360/360) | 1755.06 s Done.
#    [Task 11/12]  Current/Best:  372.18/1745.15 GFLOPS | Progress: (360/360) | 1684.50 s Done.
#    [Task 12/12]  Current/Best:  215.34/2271.11 GFLOPS | Progress: (400/400) | 2128.74 s Done.
#    Compile...
#    Evaluate inference time cost...
#    Mean inference time (std dev): 3.16 ms (0.03 ms)
