import argparse
from model.correct_model import CorrectGCN, CorrectGNN
from data.graph_dataset import TypeGraphDataset
import torch
import datetime
import os
from torch.utils.data import DataLoader
import tqdm
from loguru import logger
import sys
from my_utils.log_util import LogUtil
# import pickle

from my_utils.eval_util import eval_correct_gnn

label_list = [
    'divide', 'abs', 'expand_dims', 'negative', 'upsampling', 'batch_matmul', 'global_max_pool2d', 'subtract',
    'minimum', 'concatenate', 'add', 'relu', 'bias_add', 'global_avg_pool2d', 'exp', 'cast', 'multiply', 'clip',
    'upsampling3d', 'tanh', 'max_pool3d', 'log', 'strided_slice', 'pad', 'avg_pool3d', 'max_pool2d', 'maximum', 'dense',
    'transpose', 'sqrt', 'sigmoid', 'adaptive_avg_pool3d', 'batch_flatten', 'conv3d', 'avg_pool2d', 'softmax',
    'greater', 'leaky_relu', 'adaptive_max_pool3d', 'conv2d', 'input', 'p'
]


def change_nodes(labels, ratio=0.1):
    """Change some nodes' label and generate the target vector

    :param labels: A label list
    :type labels: Tensor
    :param ratio: The percentage of changable nodes, defaults to 0.1
    :type ratio: float, optional
    :return: The changed label list and the target vectpr
    :rtype: Tuple(Tensor, Tensor)
    """
    new_labels = labels
    tail_index = label_list.index('conv2d')
    normal_node_num = sum([x <= tail_index for x in labels])
    changable_node_num = int(normal_node_num * ratio)
    changable_index_list = []
    for i, x in enumerate(labels):
        if x <= tail_index:
            changable_index_list.append(i)
    target = torch.zeros(len(labels))
    for i in range(changable_node_num):
        pos = torch.randint(low=0, high=len(changable_index_list), size=[])
        changable_index = changable_index_list[pos]
        original_label = new_labels[changable_index]
        changed_label = torch.randint(low=0, high=len(label_list) - 2, size=[])
        while changed_label == original_label:
            changed_label = torch.randint(low=0, high=len(label_list) - 2, size=[])
        new_labels[changable_index] = changed_label
        target[changable_index] = 1

    return new_labels, target


def run(args):
    # Create the results dir
    if not os.path.exists("./results"):
        os.mkdir("./results")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    current_time = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")

    logfile_path = LogUtil.create_dir(args, current_time, os.path.basename(sys.argv[0]))

    # Prepare Data
    train_dataset = TypeGraphDataset(path="{}/train".format(args.data))
    test_dataset = TypeGraphDataset(path="{}/test".format(args.data))

    train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True, pin_memory=False, num_workers=0)
    test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=True, pin_memory=False, num_workers=0)

    # Model Init
    net = CorrectGNN(embed_dim=args.hsize)
    if args.model == "gcn":
        net = CorrectGCN(embed_dim=args.hsize)

    if not args.ckpt == "None":
        net.load_state_dict(torch.load(args.ckpt))
    net.to(device)

    # Init Optimizor and Loss
    optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
    if args.optim == "sgd":
        optimizer = torch.optim.SGD(net.parameters(), lr=args.lr)

    # criterion = torch.nn.MSELoss()
    criterion = torch.nn.CrossEntropyLoss()

    if args.eval == 1:
        net.eval()
        logger.info("Start evaluation ...")
        eval_correct_gnn(net, test_dataloader, device, logfile_path, change_nodes, args.change_ratio)
        exit(0)

    # Training
    for i in range(args.epochs):
        pbar = tqdm.tqdm(total=len(train_dataset),
                         file=open(logfile_path, "a+"),
                         desc="Epoch:{}, Loss:{}".format(i + 1, 0),
                         position=0,
                         leave=True)
        count = 0
        sum_loss = 0
        net.train()
        for labels, edges in train_dataloader:

            pbar.update(1)

            label_input = torch.LongTensor(labels)
            # Random change some node type and Generate the target vector
            label_input, target = change_nodes(label_input, args.change_ratio)
            label_input, target = label_input.to(device), target.long().to(device)
            adj_data = torch.LongTensor(edges).to(device)
            pred = net(label_input, adj_data)
            optimizer.zero_grad()
            loss = criterion(pred, target)

            if torch.isnan(loss):
                continue
            loss.backward()
            optimizer.step()

            sum_loss += loss.item()
            count += 1
            # if count % 10 == 0:
            pbar.set_description("Epoch:{}, Loss:{}".format(i + 1, sum_loss / count))
        pbar.close()
        logger.info("Epoch:{}, Loss:{}".format(i + 1, sum_loss / count))

        # Test
        if i % args.eval_per_epoch == 0 or i == (args.epochs - 1):
            # Saving the checkpoint
            save_path = "results/{}-({})/checkpoints/".format(current_time, os.path.basename(sys.argv[0]))
            torch.save(net.state_dict(), "{}/model_ckpt_{}.pth".format(save_path, i))
            logger.info("Start evaluation ...")
            eval_correct_gnn(net, device, test_dataloader, logfile_path, change_nodes, args.change_ratio)

    logger.info("Finished!")


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='TVM Reversion')
    parser.add_argument("--epochs", default=200, type=int, help="number of epochs")
    parser.add_argument("--lr", default=1e-4, type=float, help="Learning rate")
    parser.add_argument("--hsize", default=200, type=int, help="hidden size of lstm")
    parser.add_argument("--num_layers", default=2, type=int, help="layers of lstm")
    parser.add_argument("--eval_per_epoch", default=2, type=int, help="Do evaluation at every n epochs")
    parser.add_argument("--ckpt", default="None", type=str, help="Checkpoint file")
    parser.add_argument("--optim", default="adam", type=str, help="Optimizer: [adam, sgd]")
    parser.add_argument("--gpu", default="0", type=str)
    parser.add_argument("--eval", default=0, type=int, help="Skip the training step")
    parser.add_argument("--data", default="tvm_data/model_graph_data/", type=str, help="The dataset dir")
    parser.add_argument("-cr", "--change_ratio", default=0.1, type=float, help="The percentage of changed nodes")
    parser.add_argument("--shape_info", default=0, type=int)
    parser.add_argument("--num_class", default=9, type=int)
    parser.add_argument("--weight_info", default=0, type=int)
    parser.add_argument("--value_trace", default=1, type=int)
    parser.add_argument("--train_ratio", default=0.7, type=float)
    parser.add_argument("--config_path", default="tvm_data/other_funcs/elf/", type=str)
    parser.add_argument("--model", default="v3", type=str)
    parser.add_argument("--log_level", default="INFO", type=str)

    args = parser.parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    run(args)
