from sys import stderr
from typing import Dict, List
import os
#from sklearn import metrics
import metrics
from torch.functional import Tensor
from my_utils.parsing_util import Parser
import argparse
from loguru import logger
from my_utils.angr_util import AngrUtil
import torch
import numpy as np
import gensim
from data.tvm_dataset import shape_embedding
from tvm_graph.node import Node
# from utils.recover_util import RecoverUtil
from my_utils.recover_util_keras import RecoverUtilKeras
from my_utils.validate_util import ValidateUtil
import pickle as pk
import tvm
import networkx as nx

from data.tvm_dataset import get_clean_name, atom_list


def get_weights_dict(so_path: str) -> Dict:
    """Using modified tvm to get model weights from .so file and convert them to a dict

    :param so_path: Path of the so file
    :type so_path: str
    :return: A dict of weights where key is the name and value is a numpy array
    :rtype: Dict
    """
    lib = tvm.runtime.load_module(so_path)
    paramFunc = lib["get_params"]
    empty_str = ""
    param_names = paramFunc(empty_str)  # 初始化paramFunc，获得参数的名称。名称之间用","分隔（第一次调用）。此处一定要传入一个任意字符串，不然无法获得返回值
    param_name_list = param_names.split(',')
    param_dict = {}
    for pname in param_name_list:
        tmp: tvm.runtime.NDArray = paramFunc(pname)  # 再次调用paramFunc（第2...n次调用）
        param_dict[pname] = tmp.asnumpy()
    return param_dict


def get_shape(node: Node) -> Tensor:
    """Convert the input and outout shape into an embedding vector

    :param node: Our Node class
    :type node: Node
    :return: An embedding tensor (its shape is always [12*5*2])
    :rtype: Tensor
    """
    out_shape = node.out_shape
    in_shape = []
    if len(node.in_shape) > 0:
        in_shape = node.in_shape[0]
    shape_embedding_list = [shape_embedding(in_shape), shape_embedding(out_shape)]
    return torch.cat(shape_embedding_list)


def get_embedding(func_data: Dict, node: Node, w2v_model="word2vec-train-v2.model") -> List:
    """Convert the raw data to the word embedding by word2vec

    :param func_data: A data dict, which contains basic block data and trace data
    :type func_data: Dict
    :param node: Our Node class, it has some input/output shape info
    :type node: Node
    :param w2v_model: The path of the word model, defaults to "word2vec-train-v2.model"
    :type w2v_model: str, optional
    :return: Three kinds of data, [instruction sequence, value sequence, shape sequence]
    :rtype: List
    """
    data = [[]]
    value_data = [[]]
    embedding_dim = 200

    model = gensim.models.Word2Vec.load(w2v_model)
    cs_words = [
        "csqword", "csdword", "cspword", "csxmmword", "csymmword", "csunk", "csoff", "csbyte", "aAssertFail", "csasc"
    ]
    shape_data = get_shape(node).unsqueeze(0)

    for b in func_data['blocks']:
        if len(b) == 0:
            continue
        data[-1].append(torch.zeros(1, len(b), embedding_dim))

        value_count = sum([t.isdigit() or "0x" in t for t in b])

        if value_count == 0:
            value_data[-1].append(torch.zeros(1, 1, embedding_dim))
        else:
            value_data[-1].append(torch.zeros(1, value_count, embedding_dim))
        value_pos = 0
        for pos, t in enumerate(b):
            for cs_word in cs_words:
                if cs_word in t:
                    t = cs_word
            if t.isdigit():
                vector = torch.zeros(embedding_dim) if int(t) >= 0 else torch.ones(embedding_dim)
                vector[:15] = shape_embedding([abs(int(t))], max_length=15, max_dim=1)
                value_data[-1][-1][:, value_pos, :] = vector
                t = "num"
                value_pos += 1
            elif "0x" in t:
                tmp = int(t, 16)
                vector = torch.zeros(embedding_dim) if tmp >= 0 else torch.ones(embedding_dim)
                vector[:15] = shape_embedding([abs(tmp)], max_length=15, max_dim=1)
                value_data[-1][-1][:, value_pos, :] = vector
                t = "num"
                value_pos += 1

            if t not in model.wv:
                logger.warning("Word {} is Not Found!".format(t))
                continue
            data[-1][-1][:, pos, :] = torch.from_numpy(np.copy(model.wv[t]))

    return data, value_data, shape_data


def analyze_so_file(so_path):
    util = AngrUtil(so_path)
    util.identify_function()
    trace_data = {}
    logger.info(util.function_names)
    for function_name in util.function_names:
        trace_data[function_name] = {}
        # trace_data[function_name]['trace'] = []
        trace_data[function_name]['blocks'] = []

        g = util.get_function_graph(function_name)
        blocks = util.get_func_blocks_by_name(function_name)
        blocks_dict = util.contruct_block_dict(blocks)
        # Extend the Graph
        g, blocks_dict = util.try_extend_graph(g, blocks_dict)

        for k in blocks_dict:
            tokens = util.get_block_tokens(blocks_dict[k])
            trace_data[function_name]['blocks'].append(tokens)

        # for _ in range(args.trace_number):
        #     trace = util.random_walk(blocks_dict, g)
        #     tokens = util.parsing_trace(trace)
        #     # trace_data.append(tokens)
        #     trace_data[function_name]['trace'].append(tokens)

    return trace_data


def run(args):
    json_path = args.json
    so_path = args.so

    # trace_data = analyze_so_file(so_path)
    # Directly load the ida-pro result for the trace data instead of using the angr
    trace_data = pk.load(open(so_path, "rb"))

    logger.info("Loading the weight ...")
    weight_data = get_weights_dict(so_path[:-4])
    with open("tmp_weight.pkl", "wb") as f:
        pk.dump(weight_data, f)
    logger.info("Saved the weight!")

    node_arr, shape_arr = Parser.parse_graph(json_path)
    graph = Parser.convert(node_arr, shape_arr)

    logger.debug(len(graph.node_set))

    gt = []
    pred = []

    for node in graph.node_set:
        logger.debug("Processing node [{}]".format(node.name))
        input_data = None
        if node.unknown:
            continue
        if node.is_extra:
            continue
        if "nop" in node.name:
            continue
        # Fix the name of node
        tmp_name = node.name.split("-")[0]
        gt_atom_type = get_clean_name(tmp_name)

        if tmp_name not in trace_data:
            if tmp_name + "_compute_" not in trace_data:
                logger.error("Unseen Function Name: {}!".format(tmp_name))
                node.unknown = True
            else:
                # input_data = get_embedding(trace_data[tmp_name + "_compute_"], node, "word2vec-train-ida.model")
                input_data = get_embedding(trace_data[tmp_name + "_compute_"], node, "word2vec-train-ida-arm.model")
                node.bb_data = [input_data[0]]
                # input_data_old = get_embedding(trace_data[node.name + "_compute_"], node, "word2vec-train-v2.model")
        else:
            input_data = get_embedding(trace_data[tmp_name], node, "word2vec-train-ida.model")
            # input_data_old = get_embedding(trace_data[node.name], node)

        logger.debug(node.in_shape)
        logger.debug(node.out_shape)

        if not node.is_extra:
            # Infer function type and parameters
            Parser.analyzing_node_super(node, input_data)
            gt.append(atom_list.index(gt_atom_type))
            pred.append(atom_list.index(node.atomic_type))
            logger.debug("GroundTruth: " + gt_atom_type)
            logger.debug("Predict: " + node.atomic_type)
            # if node.atomic_type == "minimum" or node.atomic_type == "negative":
            #     logger.debug(trace_data[tmp_name + "_compute_"])

            # TODO Apply some rules to classify the wrong prediction
            # Parser.check_wrong_pred(node)
            del input_data
            torch.cuda.empty_cache()

    logger.debug(gt)
    logger.debug(pred)
    logger.info("[Before Correction] Accuracy: {}".format(metrics.accuracy_score(gt, pred)))

    # Clean NOP node
    graph.remove_nop()
    # Apply the GCN to correct nodes
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # Iterative Correct
    for i in range(args.iter_correct_num):
        if args.is_correct == 0:
            break
        wrong_node_name_list = Parser.check_pred_by_gcn(graph, device)
        if len(wrong_node_name_list) == 0:
            break
        else:
            logger.debug("Found {} wrong nodes!".format(len(wrong_node_name_list)))
        for node_name in wrong_node_name_list:
            tmp_node = graph.find_node_by_name(node_name)
            origin_index = tmp_node.function_param["topk"].index(tmp_node.atomic_type)
            logger.debug("Correct node [{}]: {} -> {}".format(node_name, tmp_node.atomic_type,
                                                              tmp_node.function_param["topk"][origin_index + 1]))
            tmp_node.atomic_type = tmp_node.function_param["topk"][origin_index + 1]

        pred = []
        for node in graph.node_set:
            if node.unknown or node.is_extra or "nop" in node.name:
                continue
            pred.append(atom_list.index(node.atomic_type))

        logger.info("[After GCN Correction (epoch: {})] Accuracy: {}".format(i, metrics.accuracy_score(gt, pred)))

    # Correct nodes
    isWrongCheck = True if args.is_correct == 1 and args.iter_correct_num > 1 else False
    if isWrongCheck:
        pred = []
        nodelist = list(nx.topological_sort(graph.nx_graph))
        for idx in nodelist:
            node = graph.find_node_by_name(graph.nx_graph.nodes[idx]["name"])
            if node.unknown or node.is_extra or "nop" in node.name:
                continue
            node.has_wrong_pred = False
            node.wrong_pred_category = -1
            Parser.check_wrong_pred(node)
            while node.has_wrong_pred:
                if node.has_wrong_pred:
                    logger.debug("Final Correct {} [before]: [{}]".format(node.name, node.atomic_type))
                    node = ValidateUtil.correct_wrong_node(node)
                    logger.debug("Final Correct {} [after]: [{}]".format(node.name, node.atomic_type))
                    node.has_wrong_pred = False
                    node.wrong_pred_category = -1
                    Parser.check_wrong_pred(node)

        for node in graph.node_set:
            if node.unknown or node.is_extra or "nop" in node.name:
                continue
            pred.append(atom_list.index(node.atomic_type))
        for i in range(len(gt)):
            if gt[i] != pred[i]:
                logger.info("Fail Case: GT[{}], Pred[{}]".format(gt[i], pred[i]))
        logger.info("[After Final GCN Correction] Accuracy: {} [Total:{}]".format(metrics.accuracy_score(gt, pred),
                                                                                  len(gt)))

    # Convert the atomic function to framework function
    graph = Parser.convert_to_framework(graph, framework="keras")

    for node in graph.node_set:
        # Add validation of these parameters
        node = ValidateUtil.validateNode(node)
    util = RecoverUtilKeras(graph=graph)
    util.generate_init_codes()
    util.generate_forward_codes()
    logger.info("\n" + util.output_model_code())
    with open("test_model.py", "w") as f:
        f.write(util.output_model_code())


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='TVM Reversion')
    parser.add_argument(
        "--json",
        help="The path to the json file of the computation graph.",
        default="./data/module.json",
        type=str,
    )
    parser.add_argument(
        "--so",
        help="The path to the .so file of the model.",
        default="./data/model.so",
        type=str,
    )
    parser.add_argument("--gpu", default="0", type=str)
    parser.add_argument("--is_correct", default=0, type=int)
    parser.add_argument("--iter_correct_num", default=1, type=int)
    parser.add_argument("--log_level", default="INFO", type=str)
    args = parser.parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    logger.remove()
    logger.add(stderr, level=args.log_level)
    run(args)
