import mindspore_lite as mslite
from mindspore_lite import DataType
import numpy as np
import argparse
import os, time
from sklearn.metrics.pairwise import cosine_similarity


def ms_dtype_to_np_dtype(ms_tensor):
    if ms_tensor.dtype == DataType.BOOL:
        return bool
    elif ms_tensor.dtype == DataType.FLOAT32:
        return np.float32
    elif ms_tensor.dtype == DataType.FLOAT16:
        return np.float16
    elif ms_tensor.dtype == DataType.INT64:
        return np.int64
    elif ms_tensor.dtype == DataType.INT32:
        return np.int32
    elif ms_tensor.dtype == DataType.INT8:
        return np.int8
    elif ms_tensor.dtype == DataType.UINT8:
        return np.uint8
    else:
        print("Error: cannot convert ms_dtype: " + str(ms_tensor.dtype))
        exit(1)


def check_calib_contians_zero(calib_file_np):
    calib_file_np = calib_file_np.flatten()
    for i in range(len(calib_file_np)):
        if calib_file_np[i] == 0:
            return True
    return False


def compare_bias(model_out_np, calib_file_np):
    calib_file_np = calib_file_np.flatten()
    model_out_np = model_out_np.flatten()
    abs_diff = np.absolute(model_out_np - calib_file_np)
    for i in range(len(abs_diff)):
        abs_diff[i] = 0 if abs_diff[i] < 1e-8 else abs_diff[i]
    abs_calib = np.absolute(calib_file_np)
    mean_bias = np.mean(abs_diff / abs_calib) * 100
    max_bias = np.max(abs_diff / abs_calib) * 100
    min_bias = np.min(abs_diff / abs_calib) * 100
    return [min_bias, max_bias, mean_bias]


def compare_cos_similarity(model_out_np, calib_file_np):
    a = model_out_np.flatten().reshape(1, -1)
    b = calib_file_np.flatten().reshape(1, -1)
    return cosine_similarity(a, b)[0][0]


def compare_allclose(model_out_np, calib_file_np):
    calib_file_np = calib_file_np.reshape(model_out_np.shape)
    rtols = [10, 1, 0.1, 0.01, 0.005, 0.001, 0.0005, 0.0001, 0.00005, 0.00001]
    res = []
    for r in rtols:
        res.append(np.allclose(model_out_np, calib_file_np, rtol=r))
    return res


parser = argparse.ArgumentParser(
    description="Benchmark for MSLite MindIR model inference"
)
parser.add_argument(
    "-m",
    "--model",
    required=True,
    nargs="?",
    type=str,
    help="MSLite Ascend mindir file",
)
parser.add_argument(
    "-l",
    "--loop_times",
    required=False,
    nargs="?",
    type=int,
    help="Benchmark loop times",
    default=10,
)
parser.add_argument(
    "-p",
    "--provider",
    required=False,
    nargs="?",
    type=str,
    help='Ascend provider, support "ge" or by default "acl"',
    default="",
    choices=["ge"],
)
parser.add_argument(
    "-c",
    "--config_file",
    required=False,
    nargs="?",
    type=str,
    help="Benchmark config file path",
    default="",
)
parser.add_argument(
    "--warmup_loop_times",
    "--warmup_loop_times",
    required=False,
    nargs="?",
    type=int,
    help="Benchmark warm up loop times",
    default=3,
)
parser.add_argument(
    "-i",
    "--in_data_file",
    required=False,
    nargs="?",
    type=str,
    help="Benchmark input data files, e.g. input0.bin,input1.bin",
    default="",
)
parser.add_argument(
    "-o",
    "--bench_data_file",
    required=False,
    nargs="?",
    type=str,
    help="Benchmark output data files for accuracy comparison, e.g. output0.bin,output1.bin",
    default="",
)
parser.add_argument(
    "-s",
    "--input_shapes",
    required=False,
    nargs="?",
    type=str,
    help="Dynamic input model shapes, e.g. 1,2,3,4:5,6,7,8",
    default="",
)
parser.add_argument(
    "-save_input_output",
    "--save_input_output",
    required=False,
    nargs="?",
    type=str,
    help="Save input and output .bin file, e.g. true/false",
    default=False,
    choices=["true", "false"],
)
parser.add_argument(
    "-device",
    "--device",
    required=True,
    nargs="?",
    type=str,
    help="Specify inference platform: CPU, Ascend",
    choices=["CPU", "Ascend"],
)

args = parser.parse_args()
source_file = args.model
save_io_switch = False
if args.save_input_output == "true":
    save_io_switch = True
device_platform = args.device

# context information
context = mslite.Context()
if device_platform == "Ascend":
    context.target = ["ascend"]
    context.ascend.device_id = 0
    if os.environ.get("ASCEND_DEVICE_ID") != None:
        context.ascend.device_id = int(os.environ.get("ASCEND_DEVICE_ID"))
    context.ascend.provider = args.provider
elif device_platform == "CPU":
    context.target = ["cpu"]
else:
    print("Error: unsupported device platform " + device_platform)
    exit(1)
config = args.config_file

print("run benchmark on model: " + str(source_file))
print("Platform: " + str(device_platform))
print("NPU device id: " + str(context.ascend.device_id))
print("ConfigFilePath: " + config)
print("WarmUpLoopCount: " + str(args.warmup_loop_times))
print("LoopCount: " + str(args.loop_times))
print("InDataPath: " + str(args.in_data_file))
print("benchmarkDataFile: " + str(args.bench_data_file))

# init model object
model = mslite.Model()

# load mindir from file
model.build_from_file(source_file, mslite.ModelType.MINDIR, context, config_path=config)

input_files = []
inputs = model.get_inputs()

if args.input_shapes != "":
    new_shape = []
    for s in args.input_shapes.split(":"):
        new_shape.append([int(s) for s in s.split(",")])
    print("Resize input: " + str(new_shape))
    model.resize(inputs, new_shape)

if args.in_data_file != "":
    input_files = args.in_data_file.split(",")
    # get inputs from model
    if len(inputs) != len(input_files):
        print(
            "Error: Provided number of input files do not match model inputs size: "
            + str(len(inputs))
        )
        exit(1)

input_tensors = []
# get inputs from model
for i in range(len(inputs)):
    input = inputs[i]
    input_np = None
    if len(input_files) != 0:
        input_np = np.fromfile(input_files[i], dtype=ms_dtype_to_np_dtype(input))
    else:
        input_np = np.random.rand(*input.shape).astype(ms_dtype_to_np_dtype(input))
    input.set_data_from_numpy(input_np)
    if device_platform == "Ascend":
        device_name = "ascend:" + str(context.ascend.device_id)
        input_np = input_np.reshape(input.shape)
        input_tensors.append(
            mslite.Tensor(input_np, input_np.shape, input.dtype, device=device_name)
        )
    else:
        input_np = input_np.reshape(input.shape)
        input_tensors.append(mslite.Tensor(input_np, input_np.shape, input.dtype))

    if save_io_switch:
        bin_file_name = source_file + "_in_" + str(i) + ".bin"
        print("Saving input %d to file: %s" % (i, bin_file_name))
        input_np.tofile(bin_file_name)

# run model warmup
print("Running warm up loops...")
for i in range(args.warmup_loop_times):
    outputs = model.predict(inputs)
    if save_io_switch:  # when save bin option is turned on
        for j in range(len(outputs)):
            output_np = outputs[j].get_data_to_numpy()
            bin_file_name = source_file + "_out_" + str(j) + ".bin"
            print("Saving output %d to file: %s" % (j, bin_file_name))
            output_np.tofile(bin_file_name)
            save_io_switch = False
    if args.bench_data_file != "":  # when user provided calib data file
        # compare differences
        rtol_accs = []
        cos_accs = []
        for j in range(len(outputs)):
            output_files = args.bench_data_file.split(",")
            if len(outputs) != len(output_files):
                print(
                    "Error: Provided number of output files do not match model outputs size: "
                    + str(len(outputs))
                )
                exit(1)
            model_out_np = outputs[j].get_data_to_numpy()
            out_dtype = model_out_np.dtype
            calib_file_np = np.fromfile(
                output_files[j], dtype=ms_dtype_to_np_dtype(outputs[j])
            )
            print(
                "Model output %d: %s"
                % (j, model_out_np.flatten()[: min(5, len(model_out_np.flatten()))])
            )
            print(
                "Calib file %d: %s"
                % (j, calib_file_np.flatten()[: min(5, len(model_out_np.flatten()))])
            )
            cos_acc = compare_cos_similarity(model_out_np, calib_file_np)
            cos_accs.append(cos_acc)
            if check_calib_contians_zero(calib_file_np):
                rtol_accs.append(None)
            else:
                rtol_acc = compare_bias(model_out_np, calib_file_np)
                rtol_accs.append(rtol_acc)

        print("\n================ Comparing Output data ================")
        for j in range(len(outputs)):
            if rtol_accs[j] != None:
                print(
                    "Relative bias of node/tensor %d : Min: %0.3f%%, Max: %0.3f%%, Avg: %0.3f%%"
                    % (j, rtol_accs[j][0], rtol_accs[j][1], rtol_accs[j][2])
                )
            else:
                print(
                    "Warning: calib file contains 0, cannot calculate mean bias for node %d..."
                    % (j)
                )
                continue
        print("=======================================================\n")

        print("================ Comparing Output data ================")
        for j in range(len(outputs)):
            print("Mean cosine distance of node/tensor %s : %0.8f" % (j, cos_accs[j]))
        print("=======================================================")
        print(
            "Mean cosine distance of all nodes/tensors : %0.8f\n"
            % (sum(cos_accs) / len(cos_accs))
        )
        exit(0)

time_diffs = []
outputs = []
output_tensors = []
# run model infer
print("Running benchmark loops...")
for i in range(args.loop_times):
    start = time.time()
    outputs = model.predict(inputs)
    diff = (time.time() - start) * 1000
    time_diffs.append(diff)

device_name = "ascend:" + str(context.ascend.device_id)
for j in range(len(outputs)):
    output_tensors.append(
        mslite.Tensor(
            shape=outputs[j].get_data_to_numpy().shape,
            dtype=outputs[j].dtype,
            device=device_name,
        )
    )

# run device only model infer
if device_platform == "Ascend":
    print("Running device only benchmark loops...")
    infer_only_time_diffs = []
    for i in range(args.loop_times):
        start = time.time()
        outputs = model.predict(input_tensors, output_tensors)
        diff = (time.time() - start) * 1000
        infer_only_time_diffs.append(diff)
    print("================ Infer only performance ================")
    print(
        "Model = %s, MinRunTime = %0.6f ms, MaxRuntime = %0.6f ms, AvgRunTime = %0.6f ms"
        % (
            source_file,
            min(infer_only_time_diffs),
            max(infer_only_time_diffs),
            sum(infer_only_time_diffs) / len(infer_only_time_diffs),
        )
    )

print("================ End to end performance ================")
print(
    "Model = %s, MinRunTime = %0.6f ms, MaxRuntime = %0.6f ms, AvgRunTime = %0.6f ms"
    % (source_file, min(time_diffs), max(time_diffs), sum(time_diffs) / len(time_diffs))
)
print("========================================================")
print("Run Benchmark Success!")
