import argparse
from model.net import CLSNet, CLSNet_Attention
from data.tvm_dataset import TVMAttrDataSet
import torch
import datetime
import os
from torch.utils.data import DataLoader
import tqdm
from loguru import logger
import sys
from utils.log_util import LogUtil
import losses as losses

from utils.eval_util import eval_metrics_one_dataset

label_list = [
    'LSTM', 'UpSampling2D', 'UpSampling3D', 'Cropping2D', 'ZeroPadding2D', 'ZeroPadding3D', 'SeparableConv2D', 'Conv2D',
    'DepthwiseConv2D'
]


def run(args):
    # Create the results dir
    if not os.path.exists("./results"):
        os.mkdir("./results")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    current_time = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")

    LogUtil.create_dir(args, current_time, os.path.basename(sys.argv[0]))

    # Prepare Data
    train_dataset = TVMAttrDataSet(args.data,
                                   train_ratio=0.7,
                                   w2v_model=args.w2v_model,
                                   type=args.type,
                                   attr_name=args.attr_name,
                                   attr_idx=args.attr_idx,
                                   config_path=args.config_path)
    test_dataset = TVMAttrDataSet(args.data,
                                  train_ratio=0.7,
                                  mode='test',
                                  w2v_model=args.w2v_model,
                                  type=args.type,
                                  attr_name=args.attr_name,
                                  attr_idx=args.attr_idx,
                                  config_path=args.config_path)
    train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True, pin_memory=True)
    test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=True, pin_memory=True)

    # Model Init
    net = CLSNet(in_d=200, out_classes=args.embed_dim, hiden=args.hsize, num_layers=args.num_layers)
    if args.attention == 1:
        net = CLSNet_Attention(in_d=200, out_classes=args.embed_dim, hiden=args.hsize, num_layers=args.num_layers)
    if not args.ckpt == "None":
        net.load_state_dict(torch.load(args.ckpt))
    net.to(device)

    criterion = torch.nn.CrossEntropyLoss()
    to_optim = [{'params': net.parameters(), 'lr': args.lr, 'weight_decay': args.decay}]
    if args.loss_method == "pal":
        criterion, to_optim = losses.loss_select(args.loss_method, args, to_optim)

    # Init Optimizor
    optimizer = torch.optim.Adam(to_optim)
    if args.optim == "sgd":
        optimizer = torch.optim.SGD(to_optim)

    # Training
    for i in range(args.epochs):
        pbar = tqdm.tqdm(total=len(train_dataset))
        pbar.set_description("Epoch:{}, Loss:{}".format(i + 1, 0))
        count = 0
        sum_loss = 0
        net.train()
        batch_input = []
        batch_target = []
        for input, target in train_dataloader:
            pbar.update(1)
            if len(input) == 0:
                continue

            pred = net(input, device)
            batch_input.append(pred.squeeze(0))
            batch_target.append(target[0].to(device))
            if len(batch_input) == args.batch_size:
                optimizer.zero_grad()
                loss = criterion(torch.stack(batch_input), torch.stack(batch_target))
                loss.backward()
                optimizer.step()
                sum_loss += loss.item()

                count += 1
                if count % 10 == 0:
                    pbar.set_description("Epoch:{}, Loss:{}".format(i + 1, sum_loss / count))
                batch_input, batch_target = [], []

        pbar.close()
        logger.info("Epoch:{}, Loss:{}".format(i + 1, sum_loss / count))

        # Test
        if i % args.eval_per_epoch == 0 or i == (args.epochs - 1):
            save_path = "results/{}-({})/checkpoints/".format(current_time, os.path.basename(sys.argv[0]))
            torch.save(net.state_dict(), "{}/model_ckpt_{}.pth".format(save_path, i))
            logger.info("Start evaluation ...")
            net.eval()
            batch_input = []
            batch_target = []
            pbar = tqdm.tqdm(total=len(test_dataset))
            for input, target in test_dataloader:
                pbar.update(1)
                pred = net(input, device)
                batch_input.append(pred.squeeze(0).cpu().detach().numpy())
                batch_target.append(target[0].numpy())
            pbar.close()
            f1_score, nmi, recall_at_ks, _, precision = eval_metrics_one_dataset(batch_input,
                                                                                 batch_target,
                                                                                 k_vals=args.k_vals,
                                                                                 n_classes=args.num_classes)

            result_str = ', '.join('@{0}: {1:.4f}'.format(k, rec) for k, rec in zip(args.k_vals, recall_at_ks))
            result_str = 'Epoch (Test) {0}: Precision [{1:.4f}] | NMI [{2:.4f}] | F1 [{3:.4f}] | Recall [{4}]'.format(
                i + 1, precision, nmi, f1_score, result_str)
            logger.info(result_str)

    logger.info("Finished!")


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='TVM Reversion')
    parser.add_argument("--epochs", default=200, type=int, help="number of epochs")
    parser.add_argument("--lr", "--learning_rate", default=1e-4, type=float)
    parser.add_argument('--decay', default=4e-4, type=float, help='Weight decay for optimizer.')
    parser.add_argument("--hsize", default=200, type=int)
    parser.add_argument("--num_layers", default=2, type=int)
    parser.add_argument("--eval_per_epoch", default=10, type=int)
    parser.add_argument("--ckpt", default="None", type=str)
    parser.add_argument("--optim", default="adam", type=str)
    parser.add_argument("--gpu", default="0", type=str)
    parser.add_argument("--eval", default=0, type=int)
    parser.add_argument("--data", default="tvm_data/save_dir/trace_data_all.pkl", type=str)
    parser.add_argument("--w2v_model", default="w2v-new.model", type=str)
    parser.add_argument("--type", default="Cropping2D", type=str)
    parser.add_argument("--attr_name", default="cropping", type=str)
    parser.add_argument("--attr_idx", default=0, type=int)
    parser.add_argument("--attention", default=0, type=int)
    parser.add_argument("--num_classes", default=9, type=int)
    parser.add_argument("-bs", "--batch_size", default=32, type=int)
    parser.add_argument("--config_path", default="tvm_data/other_funcs/elf/", type=str)
    parser.add_argument("-lm", "--loss_method", default="pal")
    parser.add_argument("-ed", "--embed_dim", type=int, default=200)
    parser.add_argument('--pal_margin',
                        default=0.1,
                        type=float,
                        help='Proxy Anchor Loss: Margin for Proxies in ProxyAnchorLoss.')
    parser.add_argument('--pal_alpha',
                        default=32,
                        type=float,
                        help='Proxy Anchor Loss: Scaling Parameter for Proxies in ProxyAnchorLoss.')
    parser.add_argument('--pal_lr',
                        default=1e-4,
                        type=float,
                        help='Proxy Anchor Loss: Learning Rate for Proxies in ProxyAnchorLoss.')
    parser.add_argument('--k_vals', nargs='+', default=[1, 2, 4, 8], type=int, help='Recall @ Values.')

    args = parser.parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    run(args)
