import torch
# import numpy as np
from sklearn import metrics
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from loguru import logger
from collections import defaultdict
from my_utils.parsing_util import SLOT_LENGTH, atom_list
import pickle as pk
from sklearn.metrics import classification_report


def eval_attr(net, test_dataloader: DataLoader, device):

    net.eval()
    total_dict = defaultdict(int)
    succ_dict = defaultdict(int)
    pbar = tqdm(total=len(test_dataloader.dataset))
    for input, target, shape_info, value_data in test_dataloader:
        pbar.update(1)
        target = target[0]
        true_functype = atom_list[int(target[0])]
        type_vector = torch.nn.functional.one_hot(target[0].long(), num_classes=len(atom_list))
        pred = net(input, device, type_vector, shape_info=shape_info, value_data=value_data)
        if pred is None:
            continue
        pred = pred[0]
        pred = pred.round()
        pred = pred.clip(0, 1)
        attr_list = []
        if true_functype == "conv2d":
            attr_list = [('padding', 0), ('filters', 0), ('strides', 0), ('strides', 1), ('kernel_size', 0),
                         ('depth_multiplier', 0)]
        elif true_functype == "conv3d":
            attr_list = [('padding', 0), ('filters', 0), ('strides', 0), ('strides', 1), ('strides', 2),
                         ('kernel_size', 0)]
        elif true_functype == "upsampling":
            attr_list = [('size', 0), ('interpolation', 0)]
        elif true_functype == "upsampling3d":
            attr_list = [('size', 0)]
        elif true_functype == "pad":
            attr_list = [('padding', 0)]
        elif true_functype == "max_pool2d" or true_functype == "max_pool3d" or true_functype == "avg_pool2d" or true_functype == "avg_pool3d":
            attr_list = [('pool_size', 0), ('padding', 0)]
        elif true_functype == "strided_slice":
            attr_list = [('cropping', 0), ('cropping', 1), ('cropping', 2), ('cropping', 3)]

        for i, (attr, pos) in enumerate(attr_list):
            total_dict["{}:{}[{}]".format(true_functype, attr, pos)] += 1
            pred_attr_vector = pred[i * SLOT_LENGTH:(i + 1) * SLOT_LENGTH]
            target_attr_vector = target[1 + i * SLOT_LENGTH:1 + (i + 1) * SLOT_LENGTH]
            target_attr_vector = target_attr_vector.to(device)
            pred_attr_vector = pred_attr_vector.round()
            pred_attr_vector = pred_attr_vector.clip(0, 1)
            if all(pred_attr_vector == target_attr_vector):
                succ_dict["{}:{}[{}]".format(true_functype, attr, pos)] += 1

    pbar.close()
    for k in total_dict:
        logger.info("Accuracy [{}]: {}".format(k, succ_dict[k] / total_dict[k]))


def eval_class_and_attr(net, test_dataloader, device, args, logfile_path):
    net.eval()
    total_dict = defaultdict(int)
    succ_dict = defaultdict(int)
    gts, preds = [], []
    pbar = tqdm(total=len(test_dataloader.dataset), file=open(logfile_path, "a+"))
    for input, target, shape_info, value_data in test_dataloader:
        if args.value_trace == 0:
            value_data = None
        pbar.update(1)
        target = target[0]
        true_functype = atom_list[int(target[0])]

        pred = net(input, device, shape_info=shape_info, value_data=value_data)
        if pred is None:
            continue
        # pred = pred[0].topk(1)[1][0]
        pred = pred[0]
        functype = atom_list[pred[:len(atom_list)].topk(1)[1][0]]

        gts.append(int(target[0]))
        preds.append(int(pred[:len(atom_list)].topk(1)[1][0]))

        isWrongType = True
        if functype == true_functype:
            isWrongType = False
            succ_dict[functype] += 1

        total_dict[true_functype] += 1

        pred = pred.round()
        pred = pred.clip(0, 1)
        attr_list = []
        if true_functype == "conv2d":
            attr_list = [('padding', 0), ('filters', 0), ('strides', 0), ('strides', 1), ('kernel_size', 0),
                         ('depth_multiplier', 0)]
        elif true_functype == "conv3d":
            attr_list = [('padding', 0), ('filters', 0), ('strides', 0), ('strides', 1), ('strides', 2),
                         ('kernel_size', 0)]
        elif true_functype == "upsampling":
            attr_list = [('size', 0), ('interpolation', 0)]
        elif true_functype == "upsampling3d":
            attr_list = [('size', 0)]
        elif true_functype == "pad":
            attr_list = [('padding', 0)]
        elif true_functype == "max_pool2d" or true_functype == "max_pool3d" or true_functype == "avg_pool2d" or true_functype == "avg_pool3d":
            attr_list = [('pool_size', 0), ('padding', 0)]
        elif true_functype == "strided_slice":
            attr_list = [('cropping', 0), ('cropping', 1), ('cropping', 2), ('cropping', 3)]

        for i, (attr, pos) in enumerate(attr_list):
            total_dict["{}:{}[{}]".format(true_functype, attr, pos)] += 1
            if isWrongType:
                continue
            pred_attr_vector = pred[len(atom_list) + i * SLOT_LENGTH:len(atom_list) + (i + 1) * SLOT_LENGTH]
            target_attr_vector = target[1 + i * SLOT_LENGTH:1 + (i + 1) * SLOT_LENGTH]
            target_attr_vector = target_attr_vector.to(device)
            pred_attr_vector = pred_attr_vector.round()
            pred_attr_vector = pred_attr_vector.clip(0, 1)
            if all(pred_attr_vector == target_attr_vector):
                succ_dict["{}:{}[{}]".format(true_functype, attr, pos)] += 1

    pbar.close()
    for k in total_dict:
        logger.info("Accuracy [{}]: {}".format(k, succ_dict[k] / total_dict[k]))

    logger.info("Saving results ...")
    with open("eval_results.pkl", "wb") as f:
        eval_results = {}
        eval_results['gt'] = gts
        eval_results['pred'] = preds
        pk.dump(eval_results, f)
    logger.info("Finished!")


def eval_class(net, test_dataloader, device, args, logfile_path):
    net.eval()
    total_dict = defaultdict(int)
    succ_dict = defaultdict(int)
    gts, preds = [], []
    pbar = tqdm(total=len(test_dataloader.dataset), file=open(logfile_path, "a+"))
    for input, target, shape_info, value_data in test_dataloader:
        pbar.update(1)
        target = target[0]
        true_functype = atom_list[int(target[0])]

        pred = net(input, device)
        if pred is None:
            continue
        # pred = pred[0].topk(1)[1][0]
        pred = pred[0]
        functype = atom_list[pred[:len(atom_list)].topk(1)[1][0]]

        gts.append(int(target[0]))
        preds.append(int(pred[:len(atom_list)].topk(1)[1][0]))

        if functype == true_functype:
            succ_dict[functype] += 1
        total_dict[true_functype] += 1

    pbar.close()
    for k in total_dict:
        logger.info("Accuracy [{}]: {}".format(k, succ_dict[k] / total_dict[k]))

    logger.info("Saving results ...")
    with open("eval_results.pkl", "wb") as f:
        eval_results = {}
        eval_results['gt'] = gts
        eval_results['pred'] = preds
        pk.dump(eval_results, f)
    logger.info("Finished!")


def eval_attr_v3(net, test_dataloader, device, args, logfile_path):
    net.eval()
    total = 0
    succ = 0
    gts, preds = [], []
    pbar = tqdm(total=len(test_dataloader.dataset), file=open(logfile_path, "a+"))
    for inp, target, shape_info, value_data in test_dataloader:
        pbar.update(1)
        target = target[0].to(device)
        pred = net(inp, device)
        if pred is None:
            continue
        # pred = pred[0].topk(1)[1][0]
        pred = pred[0]
        attr_pred = pred.topk(1)[1][0]
        logger.debug("Pred:{}, Target: {}".format(attr_pred, target))

        gts.append(int(target[0]))
        preds.append(int(attr_pred))

        if attr_pred == target:
            succ += 1
        total += 1

    pbar.close()
    logger.info("Accuracy: {}".format(succ / total))
    logger.info("\n" + classification_report(gts, preds))
    logger.info("Saving results ...")
    with open("eval_results.pkl", "wb") as f:
        eval_results = {}
        eval_results['gt'] = gts
        eval_results['pred'] = preds
        pk.dump(eval_results, f)
    logger.info("Finished!")


def eval_attr_v2(net, device, test_dataloader, logfile_path):
    net.eval()

    total_dict = defaultdict(int)
    succ_dict = defaultdict(int)
    pbar = tqdm(total=len(test_dataloader.dataset), file=open(logfile_path, "a+"))
    for input, target, shape_info, value_data in test_dataloader:
        pbar.update(1)
        target = target[0]
        true_functype = atom_list[int(target[0])]
        type_vector = torch.nn.functional.one_hot(target[0].long(), num_classes=len(atom_list))
        pred = net(input, device, type_vector, shape_info=shape_info, value_data=value_data)
        if pred is None:
            continue
        pred = pred[0]

        attr_list = []
        if true_functype == "conv2d":
            attr_list = [('padding', 0), ('filters', 0), ('strides', 0), ('strides', 1), ('kernel_size', 0),
                         ('depth_multiplier', 0)]
        elif true_functype == "conv3d":
            attr_list = [('padding', 0), ('filters', 0), ('strides', 0), ('strides', 1), ('strides', 2),
                         ('kernel_size', 0)]
        elif true_functype == "upsampling":
            attr_list = [('size', 0), ('interpolation', 0)]
        elif true_functype == "upsampling3d":
            attr_list = [('size', 0)]
        elif true_functype == "pad":
            attr_list = [('padding', 0)]
        elif true_functype == "max_pool2d" or true_functype == "max_pool3d" or true_functype == "avg_pool2d" or true_functype == "avg_pool3d":
            attr_list = [('pool_size', 0), ('padding', 0)]
        elif true_functype == "strided_slice":
            attr_list = [('cropping', 0), ('cropping', 1), ('cropping', 2), ('cropping', 3)]

        for i, (attr, pos) in enumerate(attr_list):
            total_dict["{}:{}[{}]".format(true_functype, attr, pos)] += 1
            pred_attr = pred[i * SLOT_LENGTH:(i + 1) * SLOT_LENGTH].topk(1)[1][0].int()
            target_attr = target[i + 1].int().to(device)
            if target_attr == pred_attr:
                succ_dict["{}:{}[{}]".format(true_functype, attr, pos)] += 1

    pbar.close()
    for k in total_dict:
        logger.info("Accuracy [{}]: {}".format(k, succ_dict[k] / total_dict[k]))


def eval_correct_gnn(net, device, test_dataloader, logfile_path, change_nodes, change_ratio=0.1):
    pred_arr = []
    gt_arr = []
    net.eval()
    pbar = tqdm(total=len(test_dataloader.dataset), file=open(logfile_path, "a+"))
    for labels, edges in test_dataloader:
        pbar.update(1)
        label_input = torch.LongTensor(labels)
        label_input, target = change_nodes(label_input, change_ratio)
        label_input, target = label_input.to(device), target.long().to(device)
        adj_data = torch.LongTensor(edges).to(device)
        pred = net(label_input, adj_data)
        pred_arr.extend(pred.topk(1)[1].reshape(-1).cpu().numpy())
        gt_arr.extend(target.cpu().numpy())
    pbar.close()
    logger.info("Accuracy: {}".format(metrics.accuracy_score(gt_arr, pred_arr)))
    logger.info("Recall: {}".format(metrics.recall_score(gt_arr, pred_arr)))
    logger.info("Precision: {}".format(metrics.precision_score(gt_arr, pred_arr)))
    logger.info("F1-Score: {}".format(metrics.f1_score(gt_arr, pred_arr)))


def eval_correct_gnn_with_shape(net, device, test_dataloader, logfile_path, change_nodes, change_ratio=0.1):
    pred_arr = []
    gt_arr = []
    net.eval()
    pbar = tqdm(total=len(test_dataloader.dataset), file=open(logfile_path, "a+"))
    for labels, edges, weight_dim in test_dataloader:
        pbar.update(1)
        label_input = torch.LongTensor(labels)
        weight_dim_input = torch.LongTensor(weight_dim).to(device)
        label_input, target = change_nodes(label_input, change_ratio)
        label_input, target = label_input.to(device), target.long().to(device)
        adj_data = torch.LongTensor(edges).to(device)
        pred = net(label_input, adj_data, weight_dim_input)
        pred_arr.extend(pred.topk(1)[1].reshape(-1).cpu().numpy())
        gt_arr.extend(target.cpu().numpy())
    pbar.close()
    logger.info("Accuracy: {}".format(metrics.accuracy_score(gt_arr, pred_arr)))
    logger.info("Recall: {}".format(metrics.recall_score(gt_arr, pred_arr)))
    logger.info("Precision: {}".format(metrics.precision_score(gt_arr, pred_arr)))
    logger.info("F1-Score: {}".format(metrics.f1_score(gt_arr, pred_arr)))


def eval_correct_gnn_with_bb(net, device, test_dataloader, logfile_path, change_nodes, change_ratio=0.1):
    pred_arr = []
    gt_arr = []
    net.eval()
    pbar = tqdm(total=len(test_dataloader.dataset), file=open(logfile_path, "a+"))

    for (labels, edges), (x_bb, adj_bb, intervals) in test_dataloader:

        pbar.update(1)
        logger.debug("Basic Block Length: {}".format(len(x_bb)))
        label_input = torch.LongTensor(labels)
        # Random change some node type and Generate the target vector
        label_input, target = change_nodes(label_input, change_ratio)
        label_input, target = label_input.to(device), target.long().to(device)
        adj_data = torch.LongTensor(edges).to(device)
        pred = net(label_input, adj_data, x_bb, adj_bb.to(device), intervals)
        pred_arr.extend(pred.topk(1)[1].reshape(-1).cpu().numpy())
        gt_arr.extend(target.cpu().numpy())
    pbar.close()
    logger.info("Accuracy: {}".format(metrics.accuracy_score(gt_arr, pred_arr)))
    logger.info("Recall: {}".format(metrics.recall_score(gt_arr, pred_arr)))
    logger.info("Precision: {}".format(metrics.precision_score(gt_arr, pred_arr)))
    logger.info("F1-Score: {}".format(metrics.f1_score(gt_arr, pred_arr)))