import argparse
from pathlib import Path
from vocab import Vocab
import torch
from torch.utils.data import DataLoader
from dataset import Pix2CodeDataset
from utils import collate_fn, save_model, resnet_img_transformation, ids_to_tokens, generate_visualization_object, resnet_img_transformation
from nltk.translate.bleu_score import corpus_bleu, SmoothingFunction
import math
from model.model2 import *
import matplotlib.pyplot as plt
from tqdm import tqdm

'''
Param
'''
source = 'web'

parser = argparse.ArgumentParser(description='Train the model')
parser.add_argument("--data_path", type=str,
                    default=Path("data", source, "all_data"), help="Path to the dataset")
parser.add_argument("--vocab_file_path", type=str,
                    default=None, help="Path to the vocab file")
parser.add_argument("--cuda", action='store_true',
                    default=True, help="Use cuda or not")
parser.add_argument("--img_crop_size", type=int, default=224)
parser.add_argument("--split", type=str, default="train")
parser.add_argument("--save_after_epochs", type=int, default=5,
                    help="Save model checkpoint every n epochs")
parser.add_argument("--models_dir", type=str, default=Path("result/" + source + "/model2"),
                    help="The dir where the trained models are saved")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--epochs", type=int, default=20)
parser.add_argument("--lr", type=float, default=1e-4,
                    help="Learning Rate")
parser.add_argument("--print_freq", type=int, default=1,
                    help="Print training stats every n epochs")
parser.add_argument("--seed", type=int, default=2020,
                    help="The random seed for reproducing")

def mask_loss(predict, target, valid_len, criterion):
    real_predict = []
    real_target = []
    for i in range(predict.size(0)):
        p_tmp = predict[i, :valid_len[i], :]
        real_predict.append(p_tmp)
        t_tmp = target[i, :valid_len[i]]
        real_target.append(t_tmp)
    real_predict = torch.cat(real_predict)
    # print(real_predict.argmax(dim=1))
    real_target = torch.cat(real_target)
    loss = criterion(real_predict, real_target)
    return loss

if __name__ == '__main__':
    args = parser.parse_args()
    args.vocab_file_path = args.vocab_file_path if args.vocab_file_path else Path(
        Path(args.data_path).parent, "vocab.txt")
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    # Load the vocab file
    vocab = Vocab(args.vocab_file_path)
    assert len(vocab) > 0
    # Setup GPU
    use_cuda = True if args.cuda and torch.cuda.is_available() else False
    assert use_cuda  # Trust me, you don't want to train this model on a cpu.
    # device = torch.device("cuda:0" if use_cuda else "cpu")
    device = torch.device("cpu")

    transform_imgs = resnet_img_transformation(args.img_crop_size)

    # Creating the data loader

    data_set = Pix2CodeDataset(args.data_path, args.split,
                        vocab, transform=transform_imgs)
    train_loader = DataLoader(
        data_set,
        batch_size=args.batch_size,
        collate_fn=lambda data: collate_fn(data, vocab=vocab),
        pin_memory=True if use_cuda else False,
        # num_workers=4,
        drop_last=True)

    valid_data_set = Pix2CodeDataset(args.data_path, "validation",
                               vocab, transform=transform_imgs)
    valid_loader = DataLoader(
        valid_data_set,
        batch_size=1,
        collate_fn=lambda data: collate_fn(data, vocab=vocab),
        pin_memory=True if use_cuda else False,
        # num_workers=4,
        drop_last=True)
    print('--------------------------')
    print('len_date_set', len(data_set))
    print('len_train_loader', len(train_loader))
    print('--------------------------')


    print("Created data loader")

    pipeline = Pipeline(vocab_size=len(vocab), embedding_size=256,
                        queries_size=256, keys_size=256, num_hiddens=256, dropout=0.1,
                        lstm_intput_size=256, lstm_num_hiddens=256, lstm_dropout=0.1, bias=True, num_layers=1,
                        decoder_input_size=256 * 2, decoder_num_hiddens=256, decoder_dropout=0.1, decoder_num_layers=1,
                        device=device
                        )
    pipeline.to(device)
    pipeline.train()

    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(pipeline.parameters(), lr=args.lr)

    # Training the model
    print('--------------------------')
    print('begin train')
    print('--------------------------')
    iter = 0
    iter_loss = []
    for epoch in range(args.epochs):
        pipeline.train()
        report_loss = 0
        for i, (images, captions, lengths) in enumerate(train_loader):
            images = images.to(device)
            captions = captions.to(device)
            lengths = torch.tensor(lengths).to(device)

            optimizer.zero_grad()

            output = pipeline(images, captions, lengths)
            loss = mask_loss(output, captions, lengths, criterion)
            report_loss = loss
            loss.backward()
            optimizer.step()

            if iter % 5 == 0 or iter == 0 or iter == len(train_loader) - 1:
                iter_loss.append(loss.item())
                print(
                    f'Epoch : {epoch} || iter: {iter} || Loss : {loss:.4f} || Perplexity : {math.exp(loss):.4f}')
            iter += 1

        if epoch == 0 or epoch % args.save_after_epochs == 0 or epoch == args.epochs - 1:
            save_model(args.models_dir, pipeline,
                       optimizer, epoch, report_loss.item(), args.batch_size, vocab)
            print("Saved model checkpoint")

        '''
        Validation
        '''
        pipeline.eval()
        predictions = []
        targets = []
        start = torch.tensor([vocab.get_id_by_token(vocab.get_start_token())]).to(device)
        end = vocab.get_id_by_token(vocab.get_end_token())
        for i, (image, caption) in enumerate(tqdm(valid_loader.dataset)):
            image = image.to(device).unsqueeze(0)
            caption = caption.to(device).unsqueeze(0)
            sample_ids = pipeline.sample(image, end)
            sample_ids = sample_ids.cpu().data.numpy()
            predictions.append(sample_ids)
            targets.append(caption.cpu().numpy())
        predictions = [ids_to_tokens(vocab, prediction) for prediction in predictions]
        targets = [ids_to_tokens(vocab, target.reshape(-1)) for target in targets]

        bleu = corpus_bleu([[target] for target in targets], predictions,
                           smoothing_function=SmoothingFunction().method4)
        print("BLEU score: {}".format(bleu))


    plt.figure('loss')
    plt.plot(iter_loss, color='red')
    plt.xlabel('iterator')
    plt.ylabel('loss')
    plt.show()
