import argparse
from pathlib import Path
from vocab import Vocab
import torch
from torch.utils.data import DataLoader
from dataset import Pix2CodeDataset
from utils import collate_fn, save_model, resnet_img_transformation
from model.step_lstm_with_attention_v2 import Encoder, Decoder
import math


parser = argparse.ArgumentParser(description='Train the model')

parser.add_argument("--data_path", type=str,
                    default=Path("data", "web", "all_data"), help="Path to the dataset")
parser.add_argument("--vocab_file_path", type=str,
                    default=None, help="Path to the vocab file")
parser.add_argument("--cuda", action='store_true',
                    default=True, help="Use cuda or not")
parser.add_argument("--img_crop_size", type=int, default=224)
parser.add_argument("--split", type=str, default="train")
parser.add_argument("--save_after_epochs", type=int, default=5,
                    help="Save model checkpoint every n epochs")
parser.add_argument("--models_dir", type=str, default=Path("params/attention_v2/0711-weight_decay=0.00005/"),
                    help="The dir where the trained models are saved")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--epochs", type=int, default=30)
parser.add_argument("--lr", type=float, default=1e-4,
                    help="Learning Rate")
parser.add_argument("--print_freq", type=int, default=1,
                    help="Print training stats every n epochs")
parser.add_argument("--seed", type=int, default=2020,
                    help="The random seed for reproducing")

if __name__ == '__main__':
    args = parser.parse_args()
    args.vocab_file_path = args.vocab_file_path if args.vocab_file_path else Path(
        Path(args.data_path).parent, "vocab.txt")

    print("Training args:", args)

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    # Load the vocab file
    vocab = Vocab(args.vocab_file_path)
    assert len(vocab) > 0

    # Setup GPU
    use_cuda = True if args.cuda and torch.cuda.is_available() else False
    assert use_cuda  # Trust me, you don't want to train this model on a cpu.
    device = torch.device("cuda:0" if use_cuda else "cpu")

    transform_imgs = resnet_img_transformation(args.img_crop_size)

    # Creating the data loader
    train_loader = DataLoader(
        Pix2CodeDataset(args.data_path, args.split,
                        vocab, transform=transform_imgs),
        batch_size=1,
        collate_fn=lambda data: collate_fn(data, vocab=vocab),
        pin_memory=True if use_cuda else False,
        # num_workers=4,
        drop_last=True)
    print("Created data loader")

    # Creating the models
    embed_size = 256
    hidden_size = 512
    num_layers = 1
    lr = args.lr

    encoder = Encoder(embed_size)
    decoder = Decoder(embed_size, hidden_size, len(vocab), num_layers)

    encoder = encoder.to(device)
    decoder = decoder.to(device)

    # Define optimizer and loss function
    criterion = torch.nn.CrossEntropyLoss()
    params = list(decoder.parameters()) + list(encoder.linear.parameters()
                                               ) + list(encoder.BatchNorm.parameters())
    optimizer = torch.optim.Adam(params, lr=lr, weight_decay=0.00005)

    # Training the model
    for epoch in range(args.epochs):
        batch = 0
        train_images = None
        train_captions = []
        train_target = None
        for i, (images, captions, lengths) in enumerate(train_loader):
            images = images.to(device)
            captions = captions.to(device) #zhen shi biaoqian
            targets = torch.nn.utils.rnn.pack_padded_sequence(
                input=captions, lengths=lengths, batch_first=True)[0]
            batch += 1

            if train_images is None:
                train_images = images
            else:
                train_images = torch.cat((train_images, images), 0)
            train_captions.append(captions)
            if train_target is None:
                train_target = targets
            else:
                train_target = torch.cat((train_target, targets), 0)

            if (batch == args.batch_size or len(train_loader) == i) and train_images.size(0) != 1:
                encoder.zero_grad()
                decoder.zero_grad()

                features = encoder(train_images)
                output = decoder(features, train_captions, lengths)
                loss = criterion(output, train_target)

                loss.backward()
                optimizer.step()
                loss = loss.item()
                if epoch % args.print_freq == 0 and i == args.batch_size - 1:
                    print(
                        f'Epoch : {epoch} || Loss : {loss:.4f} || Perplexity : {math.exp(loss):.4f}')
                train_images = None
                train_captions = []
                train_target = None
                batch = 0
        if epoch != 0 and epoch % args.save_after_epochs == 0:
            save_model(args.models_dir, encoder, decoder,
                       optimizer, epoch, loss, args.batch_size, vocab)
            print("Saved model checkpoint")


    print("Done Training!")
    save_model(args.models_dir, encoder, decoder,
               optimizer, epoch, loss, args.batch_size, vocab)
    print("Saved final model")
