import argparse
from collections import defaultdict
from model.gnn import GCNCLSNet
from data.tvm_dataset import TVMDataSetGNN
import torch
import datetime
import os
from torch.utils.data import DataLoader
import tqdm
from loguru import logger
import sys
from my_utils.log_util import LogUtil
import pickle

from my_utils.parsing_util import SLOT_LENGTH, SLOT_COUNT, decode

label_list = [
    'divide', 'abs', 'expand_dims', 'negative', 'upsampling', 'batch_matmul', 'global_max_pool2d', 'subtract',
    'minimum', 'concatenate', 'add', 'relu', 'bias_add', 'global_avg_pool2d', 'exp', 'cast', 'multiply', 'clip',
    'upsampling3d', 'tanh', 'max_pool3d', 'log', 'strided_slice', 'pad', 'avg_pool3d', 'max_pool2d', 'maximum', 'dense',
    'transpose', 'sqrt', 'sigmoid', 'adaptive_avg_pool3d', 'batch_flatten', 'conv3d', 'avg_pool2d', 'softmax',
    'greater', 'leaky_relu', 'adaptive_max_pool3d', 'conv2d'
]


def run(args):
    # Create the results dir
    if not os.path.exists("./results"):
        os.mkdir("./results")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    current_time = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")

    LogUtil.create_dir(args, current_time, os.path.basename(sys.argv[0]))

    # Prepare Data
    train_dataset = TVMDataSetGNN(args.data,
                                  train_ratio=args.train_ratio if args.eval == 0 else 0.001,
                                  w2v_model=args.w2v_model,
                                  config_path=args.config_path,
                                  shape_info=(args.shape_info == 1),
                                  value_trace=True,
                                  weight_info=(args.weight_info == 1))
    test_dataset = TVMDataSetGNN(args.data,
                                 train_ratio=args.train_ratio,
                                 mode='test',
                                 w2v_model=args.w2v_model,
                                 config_path=args.config_path,
                                 shape_info=(args.shape_info == 1),
                                 value_trace=True,
                                 weight_info=(args.weight_info == 1))

    train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True, pin_memory=False, num_workers=1)
    test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=True, pin_memory=False, num_workers=1)

    # Model Init
    net = GCNCLSNet(in_d=200,
                    hidden_channels=args.hsize,
                    num_layers=args.num_layers,
                    num_layers_gcn=args.num_layers_gcn,
                    has_shape=(args.shape_info == 1),
                    has_value=True,
                    has_weight_info=(args.weight_info == 1),
                    num_classes=SLOT_LENGTH * SLOT_COUNT)

    if not args.ckpt == "None":
        net.load_state_dict(torch.load(args.ckpt))
    net.to(device)

    # Init Optimizor and Loss
    optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
    if args.optim == "sgd":
        optimizer = torch.optim.SGD(net.parameters(), lr=args.lr)

    criterion = torch.nn.MSELoss()

    failure_cases = defaultdict(lambda: defaultdict(int))
    if args.eval == 1:
        net.eval()
        logger.info("Start evaluation ...")
        net.eval()
        total_dict = defaultdict(int)
        succ_dict = defaultdict(int)
        for input, target, shape_info, value_data, edge_data in tqdm.tqdm(test_dataloader):
            target = target[0]
            true_functype = decode(target[:SLOT_LENGTH], label_list)

            pred = net(input, edge_data, device, shape_info=shape_info, value_data=value_data)
            if pred is None:
                continue
            pred = pred.round()
            pred = pred.clip(0, 1)
            pred = pred[0]

            functype = decode(pred[:SLOT_LENGTH], label_list)

            isWrongType = True
            if functype == true_functype:
                isWrongType = False
                succ_dict[functype] += 1
                failure_cases[true_functype][functype] += 1

            total_dict[true_functype] += 1

            attr_list = []
            if true_functype == "conv2d":
                attr_list = [('padding', 0), ('filters', 0), ('strides', 0), ('strides', 1), ('kernel_size', 0),
                             ('depth_multiplier', 0)]
            elif true_functype == "conv3d":
                attr_list = [('padding', 0), ('filters', 0), ('strides', 0), ('strides', 1), ('strides', 2),
                             ('kernel_size', 0)]
            elif true_functype == "upsampling":
                attr_list = [('size', 0), ('interpolation', 0)]
            elif true_functype == "upsampling3d":
                attr_list = [('size', 0)]
            elif true_functype == "pad":
                attr_list = [('padding', 0)]
            elif true_functype == "max_pool2d" or true_functype == "max_pool3d" or true_functype == "avg_pool2d" or true_functype == "avg_pool3d":
                attr_list = [('pool_size', 0), ('padding', 0)]
            elif true_functype == "strided_slice":
                attr_list = [('cropping', 0), ('cropping', 1), ('cropping', 2), ('cropping', 3)]

            for i, (attr, pos) in enumerate(attr_list):
                total_dict["{}:{}[{}]".format(true_functype, attr, pos)] += 1
                if isWrongType:
                    continue
                pred_attr_vector = pred[(i + 1) * SLOT_LENGTH:(i + 2) * SLOT_LENGTH]
                target_attr_vector = target[(i + 1) * SLOT_LENGTH:(i + 2) * SLOT_LENGTH]
                target_attr_vector = target_attr_vector.to(device)
                if all(pred_attr_vector == target_attr_vector):
                    succ_dict["{}:{}[{}]".format(true_functype, attr, pos)] += 1

        for k in total_dict:
            logger.info("Accuracy [{}]: {}".format(k, succ_dict[k] / total_dict[k]))
        with open("failure_cases.pkl", "wb") as f:
            pickle.dump(failure_cases, f)
        exit(0)

    # Training
    for i in range(args.epochs):
        pbar = tqdm.tqdm(total=len(train_dataset))
        pbar.set_description("Epoch:{}, Loss:{}".format(i + 1, 0))
        count = 0
        sum_loss = 0
        net.train()
        for input, target, shape_info, value_data, edge_data in train_dataloader:
            pbar.update(1)
            if len(input) == 0:
                continue
            pred = net(input, edge_data, device, shape_info=shape_info, value_data=value_data)
            optimizer.zero_grad()

            loss = criterion(pred, target.to(device))
            if torch.isnan(loss):
                continue
            loss.backward()
            optimizer.step()

            sum_loss += loss.item()
            count += 1

            if count % 10 == 0:
                pbar.set_description("Epoch:{}, Loss:{}".format(i + 1, sum_loss / count))

            # Clean cache
            torch.cuda.empty_cache()

        pbar.close()
        logger.info("Epoch:{}, Loss:{}".format(i + 1, sum_loss / count))

        # Test
        if i % args.eval_per_epoch == 0 or i == (args.epochs - 1):
            # Saving the checkpoint
            save_path = "results/{}-({})/checkpoints/".format(current_time, os.path.basename(sys.argv[0]))
            torch.save(net.state_dict(), "{}/model_ckpt_{}.pth".format(save_path, i))
            logger.info("Start evaluation ...")

            # Close the grad computation
            net.eval()

            total_dict = defaultdict(int)
            succ_dict = defaultdict(int)
            pbar = tqdm.tqdm(total=len(test_dataset))
            for input, target, shape_info, value_data, edge_data in test_dataloader:
                pbar.update(1)

                target = target[0]
                true_functype = decode(target[:SLOT_LENGTH], label_list)

                pred = net(input, edge_data, device, shape_info=shape_info, value_data=value_data)
                if pred is None:
                    continue
                if all(torch.isnan(pred[0])):
                    continue
                # pred = pred[0].topk(1)[1][0]
                pred = pred.round()
                pred = pred.clip(0, 1)
                pred = pred[0]

                functype = decode(pred[:SLOT_LENGTH], label_list)

                isWrongType = True
                if functype == true_functype:
                    isWrongType = False
                    succ_dict[functype] += 1

                total_dict[true_functype] += 1

                attr_list = []
                if true_functype == "conv2d":
                    attr_list = [('padding', 0), ('filters', 0), ('strides', 0), ('strides', 1), ('kernel_size', 0),
                                 ('depth_multiplier', 0)]
                elif true_functype == "conv3d":
                    attr_list = [('padding', 0), ('filters', 0), ('strides', 0), ('strides', 1), ('strides', 2),
                                 ('kernel_size', 0)]
                elif true_functype == "upsampling":
                    attr_list = [('size', 0), ('interpolation', 0)]
                elif true_functype == "upsampling3d":
                    attr_list = [('size', 0)]
                elif true_functype == "pad":
                    attr_list = [('padding', 0)]
                elif true_functype == "max_pool2d" or true_functype == "max_pool3d" or true_functype == "avg_pool2d" or true_functype == "avg_pool3d":
                    attr_list = [('pool_size', 0), ('padding', 0)]
                elif true_functype == "strided_slice":
                    attr_list = [('cropping', 0), ('cropping', 1), ('cropping', 2), ('cropping', 3)]

                for i, (attr, pos) in enumerate(attr_list):
                    total_dict["{}:{}[{}]".format(true_functype, attr, pos)] += 1
                    if isWrongType:
                        continue
                    pred_attr_vector = pred[(i + 1) * SLOT_LENGTH:(i + 2) * SLOT_LENGTH]
                    target_attr_vector = target[(i + 1) * SLOT_LENGTH:(i + 2) * SLOT_LENGTH]
                    target_attr_vector = target_attr_vector.to(device)
                    if all(pred_attr_vector == target_attr_vector):
                        succ_dict["{}:{}[{}]".format(true_functype, attr, pos)] += 1

            pbar.close()
            for k in total_dict:
                logger.info("Accuracy [{}]: {}".format(k, succ_dict[k] / total_dict[k]))

    logger.info("Finished!")


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='TVM Reversion')
    parser.add_argument("--epochs", default=200, type=int, help="number of epochs")
    parser.add_argument("--lr", default=1e-4, type=float, help="Learning rate")
    parser.add_argument("--hsize", default=200, type=int, help="hidden size of lstm")
    parser.add_argument("--num_layers", default=2, type=int, help="layers of lstm")
    parser.add_argument("--num_layers_gcn", default=3, type=int, help="layers of gcn2conv")
    parser.add_argument("--eval_per_epoch", default=2, type=int, help="Do evaluation at every n epochs")
    parser.add_argument("--ckpt", default="None", type=str, help="Checkpoint file")
    parser.add_argument("--optim", default="adam", type=str, help="Optimizer: [adam, sgd]")
    parser.add_argument("--gpu", default="0", type=str)
    parser.add_argument("--eval", default=0, type=int, help="Skip the training step")
    parser.add_argument("--data", default="tvm_data/save_dir/trace_data_all.pkl", type=str, help="The training data")
    parser.add_argument("--w2v_model", default="w2v-new.model", type=str, help="word2vec model")
    parser.add_argument("--attention", default=0, type=int)
    parser.add_argument("--shape_info", default=0, type=int)
    parser.add_argument("--num_class", default=9, type=int)
    parser.add_argument("--weight_info", default=0, type=int)
    parser.add_argument("--train_ratio", default=0.7, type=float)
    parser.add_argument("--config_path", default="tvm_data/other_funcs/elf/", type=str)

    args = parser.parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    run(args)
