import argparse
from model.net import CLSNet
from data.tvm_dataset import TVMDataSet
import torch
import datetime
import os
from torch.utils.data import DataLoader
import tqdm
from loguru import logger
import sys
from my_utils.log_util import LogUtil

import torch.distributed as dist

label_list = [
    'LSTM', 'UpSampling2D', 'UpSampling3D', 'Cropping2D', 'ZeroPadding2D', 'ZeroPadding3D', 'SeparableConv2D', 'Conv2D',
    'DepthwiseConv2D', 'ReLU', 'ThresholdedReLU', 'LeakyReLU', 'Softmax', 'ELU', 'Conv3D', 'Dense', 'Reshape',
    'Flatten', 'Concatenate', 'Average', 'Maximum', 'Minimum', 'Add', 'Subtract', 'Multiply', 'Dot', 'MaxPooling2D',
    'MaxPooling3D', 'AveragePooling2D', 'AveragePooling3D', 'GlobalMaxPooling2D', 'GlobalMaxPooling3D',
    'GlobalAveragePooling2D', 'GlobalAveragePooling3D', 'BatchNormalization'
]

# Training shell
# python -m torch.distributed.launch --nnodes=1 --nproc_per_node=4 --node_rank=0 --master_port=6005 train.py


def run(args):

    # torch.cuda.set_device(args.local_rank)
    # Create the results dir
    if not os.path.exists("./results"):
        os.mkdir("./results")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    current_time = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")

    if dist.get_rank() == 0:
        LogUtil.create_dir(args, current_time, os.path.basename(sys.argv[0]))

    # Prepare Data
    train_dataset = TVMDataSet(args.data, train_ratio=0.7, w2v_model=args.w2v_model)
    test_dataset = TVMDataSet(args.data, train_ratio=0.7, mode='test', w2v_model=args.w2v_model)
    # train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True, pin_memory=True)
    # test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=True, pin_memory=True)

    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=1,
                                  shuffle=(train_sampler is None),
                                  sampler=train_sampler,
                                  num_workers=0,
                                  pin_memory=False)

    test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=1,
                                 shuffle=(test_sampler is None),
                                 sampler=test_sampler,
                                 num_workers=0,
                                 pin_memory=False)

    # Model Init
    net = CLSNet(in_d=200, out_classes=len(label_list), hiden=args.hsize, num_layers=args.num_layers)
    if not args.ckpt == "None" and dist.get_rank() == 0:
        net.load_state_dict(torch.load(args.ckpt), strict=False)
    net.to(device)

    net = torch.nn.parallel.DistributedDataParallel(net,
                                                    device_ids=[args.local_rank],
                                                    output_device=args.local_rank,
                                                    find_unused_parameters=True)

    # Init Optimizor
    optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
    if args.optim == "sgd":
        optimizer = torch.optim.SGD(net.parameters(), lr=args.lr)

    criterion = torch.nn.CrossEntropyLoss()

    # Training
    for i in range(args.epochs):
        train_dataloader.sampler.set_epoch(i)
        pbar = None
        if dist.get_rank() == 0:
            pbar = tqdm.tqdm(total=len(train_dataset))
            pbar.set_description("Epoch:{}, Loss:{}".format(i + 1, 0))
        count = 0
        sum_loss = 0
        net.train()
        for input, target in train_dataloader:
            if dist.get_rank() == 0:
                pbar.update(1)
            if len(input) == 0:
                continue
            pred = net(input, device)

            optimizer.zero_grad()

            loss = criterion(pred, target[0].to(device))
            loss.backward()
            optimizer.step()

            sum_loss += loss.item()
            count += 1

            if count % 10 == 0:
                pbar.set_description("Epoch:{}, Loss:{}".format(i + 1, sum_loss / count))
        if dist.get_rank() == 0:
            logger.info("Epoch:{}, Loss:{}".format(i + 1, sum_loss / count))
            pbar.close()

        # Test
        if i % args.eval_per_epoch == 0:
            save_path = "results/{}-({})/checkpoints/".format(current_time, os.path.basename(sys.argv[0]))
            if dist.get_rank() == 0:
                torch.save(net.state_dict(), "{}/model_ckpt_{}.pth".format(save_path, i))
                logger.info("Start evaluation ...")
            net.eval()
            tmp_count = 0
            succ_count = 0
            pbar = None
            if dist.get_rank() == 0:
                pbar = tqdm.tqdm(total=len(test_dataset))
            for input, target in test_dataloader:
                if dist.get_rank() == 0:
                    pbar.update(1)
                pred = net(input, device)
                if pred is None:
                    continue
                pred = pred[0].topk(1)[1][0]
                if pred == target[0].to(device):
                    succ_count += 1
                tmp_count += 1
            if dist.get_rank() == 0:
                pbar.close()
                logger.info("Accuracy: {}".format(succ_count / tmp_count))

    if dist.get_rank() == 0:
        logger.info("Finished!")


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='TVM Reversion')
    parser.add_argument("--epochs", default=200, type=int, help="number of epochs")
    parser.add_argument("--lr", default=1e-4, type=float)
    parser.add_argument("--hsize", default=200, type=int)
    parser.add_argument("--num_layers", default=2, type=int)
    parser.add_argument("--eval_per_epoch", default=10, type=int)
    parser.add_argument("--ckpt", default="None", type=str)
    parser.add_argument("--optim", default="adam", type=str)
    parser.add_argument("--gpu", default="0", type=str)
    parser.add_argument("--eval", default=0, type=int)
    parser.add_argument("--data", default="tvm_data/save_dir/trace_data_all.pkl", type=str)
    parser.add_argument("--w2v_model", default="w2v-new.model", type=str)
    parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed training')
    args = parser.parse_args()
    # os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    torch.cuda.set_device(args.local_rank)
    dist.init_process_group(backend='nccl')
    run(args)
