import os
import json
import argparse
import traceback

import tensorflow as tf
import numpy as np
from tensorflow.python.platform import gfile

os.environ['CUDA_VISIBLE_DEVICES'] = '1'


def run_model(model_path, feed_dict):
    # reset the Graph of Tensorflow to avoid the reuse of Graph
    tf.compat.v1.reset_default_graph()
    # limit the usage of GPU
    config = tf.compat.v1.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.8
    sess = tf.compat.v1.Session(config=config)
    with gfile.FastGFile(model_path, "rb") as f:
        graph_def = tf.compat.v1.GraphDef()
        graph_def.ParseFromString(f.read())
        sess.graph.as_default()
        # import graph from sess
        tf.import_graph_def(graph_def, name="")

    # init tht session
    op = sess.graph.get_tensor_by_name("output:0")

    # get the result of model
    result = sess.run(op, feed_dict=feed_dict)
    return result


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_path", "-m", default="/home/ubuntu/onnx_samples/onnx_transformer/source")
    parser.add_argument("--seed_path", "-s", default="/home/ubuntu/onnx_samples/onnx_transformer/source")
    parser.add_argument("--input_path", "-i", default="/home/ubuntu/onnx_samples/onnx_transformer/source")
    parser.add_argument("--result_path", "-r", default="/home/ubuntu/onnx_samples/onnx_transformer/source")
    parser.add_argument("--input_name", default="tf.math.abs_inputs.json")
    parser.add_argument("--seed_name", default="tf.math.abs_seeds.npz")
    args = parser.parse_args()
    api_name = args.input_name[:-12]
    print("[Run TF Model] Api Name: " + api_name)
    full_input_path = os.path.join(args.input_path, args.input_name)
    full_seed_path = os.path.join(args.seed_path, args.seed_name)
    with open(full_input_path, "r") as f:
        input_dicts = json.load(f)
    result_dict = {}
    for input_dict in input_dicts:
        file_name = input_dict["test_case_file_name"] + ".pb"
        print(" >>>>>>> Running model " + file_name + " <<<<<<<<")
        # If model doesn't exist
        if not os.path.exists(os.path.join(args.model_path, file_name)):
            print("[Run TF Model] Model " + file_name + " doesn't exist.")
            continue
        feed_dict = {}
        inputs = input_dict["inputs"]
        nd_arrays = np.load(full_seed_path)
        # analyze all the inputs
        for input in inputs:
            param_list = input.keys()
            for param in param_list:
                feed_dict[param + ":0"] = nd_arrays[input[param]]
        try:
            result = run_model(os.path.join(args.model_path, file_name), feed_dict)
        except Exception as e:
            print("[Run TF Model] Exception occurred~:\n")
            traceback.print_exc()
            continue
        result_dict[input_dict["test_case_file_name"]] = result
        print("Result: ")
        print(result)
        print("\nShape: " + str(result.shape))

    # save result dict to json && npz
    val_list = result_dict.values()
    # Result is Tensor or not
    is_tensor_result = False
    for val in val_list:
        if type(val) is np.ndarray:
            is_tensor_result = True
            break
    if is_tensor_result:
        np.savez(os.path.join(args.result_path, api_name + "_results"), **result_dict)
    else:
        with open(os.path.join(args.result_path, api_name + "_results.json"), "w+") as f:
            json.dump(result_dict, f)

