from random import shuffle

from torch.nn import attention
import pandas as pd
import argparse
import torch
from torch.utils.data import DataLoader
from typing import List, Tuple
from datautils import Vocabulary

from datautils import (
    Seq2SeqDataset,
    data_collate_fn,
)
import models
from train import train


MAX_LEN = 150
EPOCHS = 10
BATCH_SIZE = 64

EMBEDDING_DIM = 100
ENCODER_HID_DIM = 256
DECODER_HID_DIM = 256
DROPOUT = 0.1
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"


def load_texts_from_csv(file_path: str) -> Tuple[List[str], List[str]]:
    dataframe = pd.read_csv(file_path)
    dataframe = dataframe.dropna(subset=["Ingredients", "Recipe"])
    ingredients = dataframe["Ingredients"].tolist()
    recipes = dataframe["Recipe"].tolist()
    return ingredients, recipes


train_src, train_tgt = load_texts_from_csv("./data/train.csv")
dev_src, dev_tgt = load_texts_from_csv("./data/dev.csv")
test_src, test_tgt = load_texts_from_csv("./data/test.csv")


def run_seq2seq():
    src_vocab = Vocabulary.from_text(train_src, embedding_dim=EMBEDDING_DIM)
    tgt_vocab = Vocabulary.from_text(train_tgt, embedding_dim=EMBEDDING_DIM)

    encoder = models.EncoderLSTM(
        embedding=src_vocab.create_embedding_layer(),
        hid_dim=ENCODER_HID_DIM,
        n_layers=1,
        dropout=DROPOUT,
    )
    decoder = models.DecoderLSTM(
        embedding=tgt_vocab.create_embedding_layer(),
        hid_dim=DECODER_HID_DIM,
        n_layers=1,
        dropout=DROPOUT,
    )
    model = models.Seq2Seq(encoder, decoder)
    criteria = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters())

    dataset = Seq2SeqDataset(
        train_src, train_tgt, src_vocab=src_vocab, tgt_vocab=tgt_vocab
    )
    dataloader = DataLoader(
        dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    dev_dataset = Seq2SeqDataset(
        dev_src,
        dev_tgt,
        src_vocab=src_vocab,
        tgt_vocab=tgt_vocab,
    )
    dev_dataloader = DataLoader(
        dev_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    test_dataset = Seq2SeqDataset(
        test_src,
        test_tgt,
        src_vocab=src_vocab,
        tgt_vocab=tgt_vocab,
    )
    test_dataloader = DataLoader(
        test_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    train(
        model,
        dataloader,
        dev_dataloader,
        test_dataloader,
        optimizer,
        criteria,
        1,
        tgt_vocab,
        saved_folder="checkpoints/seq2seq",
        device=DEVICE,
        max_epoch=EPOCHS,
    )


def run_seq2seq_attention():
    src_vocab = Vocabulary.from_text(train_src, embedding_dim=EMBEDDING_DIM)
    tgt_vocab = Vocabulary.from_text(train_tgt, embedding_dim=EMBEDDING_DIM)

    encoder = models.EncoderLSTM(
        embedding=src_vocab.create_embedding_layer(),
        hid_dim=ENCODER_HID_DIM,
        n_layers=1,
        dropout=DROPOUT,
    )
    attention = models.Attention(
        enc_hid_dim=ENCODER_HID_DIM,
        dec_hid_dim=DECODER_HID_DIM,
    )
    decoder = models.DecoderAttentionLSTM(
        embedding=tgt_vocab.create_embedding_layer(),
        output_dim=tgt_vocab.vocab_size,
        enc_hid_dim=ENCODER_HID_DIM,
        dec_hid_dim=DECODER_HID_DIM,
        n_layers=1,
        dropout=DROPOUT,
        attention=attention,
    )
    model = models.Seq2Seq(encoder, decoder)
    criteria = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters())

    dataset = Seq2SeqDataset(
        train_src, train_tgt, src_vocab=src_vocab, tgt_vocab=tgt_vocab
    )
    dataloader = DataLoader(
        dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    dev_dataset = Seq2SeqDataset(
        dev_src,
        dev_tgt,
        src_vocab=src_vocab,
        tgt_vocab=tgt_vocab,
    )
    dev_dataloader = DataLoader(
        dev_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    test_dataset = Seq2SeqDataset(
        test_src,
        test_tgt,
        src_vocab=src_vocab,
        tgt_vocab=tgt_vocab,
    )
    test_dataloader = DataLoader(
        test_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    train(
        model,
        dataloader,
        dev_dataloader,
        test_dataloader,
        optimizer,
        criteria,
        1,
        tgt_vocab,
        saved_folder="checkpoints/seq2seq_attention",
        device=DEVICE,
        max_epoch=EPOCHS,
    )


def run_seq2seq_attention_with_preprocessing():
    pass


def run_seq2seq_attention_with_glove():
    glove_path = "glove.6B.100d.txt"
    return


def run_seq2seq_attention_with_shared_embedding():
    vocab = Vocabulary.from_text(train_src + train_tgt, embedding_dim=EMBEDDING_DIM)
    embedding_layer = vocab.create_embedding_layer()

    encoder = models.EncoderLSTM(
        embedding=embedding_layer,
        hid_dim=ENCODER_HID_DIM,
        n_layers=1,
        dropout=DROPOUT,
    )
    attention = models.Attention(
        enc_hid_dim=ENCODER_HID_DIM,
        dec_hid_dim=DECODER_HID_DIM,
    )
    decoder = models.DecoderAttentionLSTM(
        embedding=embedding_layer,
        output_dim=vocab.vocab_size,
        enc_hid_dim=ENCODER_HID_DIM,
        dec_hid_dim=DECODER_HID_DIM,
        n_layers=1,
        dropout=DROPOUT,
        attention=attention,
    )
    model = models.Seq2Seq(encoder, decoder)
    criteria = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters())

    dataset = Seq2SeqDataset(train_src, train_tgt, src_vocab=vocab, tgt_vocab=vocab)
    dataloader = DataLoader(
        dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    dev_dataset = Seq2SeqDataset(
        dev_src,
        dev_tgt,
        src_vocab=vocab,
        tgt_vocab=vocab,
    )
    dev_dataloader = DataLoader(
        dev_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    test_dataset = Seq2SeqDataset(
        test_src,
        test_tgt,
        src_vocab=vocab,
        tgt_vocab=vocab,
    )
    test_dataloader = DataLoader(
        test_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    train(
        model,
        dataloader,
        dev_dataloader,
        test_dataloader,
        optimizer,
        criteria,
        1,
        vocab,
        saved_folder="checkpoints/seq2seq_attention_with_shared_embedding",
        device=DEVICE,
        max_epoch=EPOCHS,
    )


def run_seq2seq_attention_with_stack_layers():
    src_vocab = Vocabulary.from_text(train_src, embedding_dim=EMBEDDING_DIM)
    tgt_vocab = Vocabulary.from_text(train_tgt, embedding_dim=EMBEDDING_DIM)

    encoder = models.EncoderLSTM(
        embedding=src_vocab.create_embedding_layer(),
        hid_dim=ENCODER_HID_DIM,
        n_layers=3,
        dropout=DROPOUT,
    )
    attention = models.Attention(
        enc_hid_dim=ENCODER_HID_DIM,
        dec_hid_dim=DECODER_HID_DIM,
    )
    decoder = models.DecoderAttentionLSTM(
        embedding=tgt_vocab.create_embedding_layer(),
        output_dim=tgt_vocab.vocab_size,
        enc_hid_dim=ENCODER_HID_DIM,
        dec_hid_dim=DECODER_HID_DIM,
        n_layers=3,
        dropout=DROPOUT,
        attention=attention,
    )
    model = models.Seq2Seq(encoder, decoder)
    criteria = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters())

    dataset = Seq2SeqDataset(
        train_src, train_tgt, src_vocab=src_vocab, tgt_vocab=tgt_vocab
    )
    dataloader = DataLoader(
        dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    dev_dataset = Seq2SeqDataset(
        dev_src,
        dev_tgt,
        src_vocab=src_vocab,
        tgt_vocab=tgt_vocab,
    )
    dev_dataloader = DataLoader(
        dev_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    test_dataset = Seq2SeqDataset(
        test_src,
        test_tgt,
        src_vocab=src_vocab,
        tgt_vocab=tgt_vocab,
    )
    test_dataloader = DataLoader(
        test_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    train(
        model,
        dataloader,
        dev_dataloader,
        test_dataloader,
        optimizer,
        criteria,
        1,
        tgt_vocab,
        saved_folder="checkpoints/seq2seq_attention_with_stack_layers",
        device=DEVICE,
        max_epoch=EPOCHS,
    )


def run_seq2seq_attention_with_beamsearch():
    pass


def run_seq2seq_attention_with_pretrained_model():
    pass


def run_seq2seq_attention_with_pointer_generator():
    # pgn must share emebdding layer
    vocab = Vocabulary.from_text(train_src + train_tgt, embedding_dim=EMBEDDING_DIM)

    encoder = models.EncoderLSTM(
        embedding=vocab.create_embedding_layer(),
        hid_dim=ENCODER_HID_DIM,
        n_layers=1,
        dropout=DROPOUT,
    )
    attention = models.Attention(
        enc_hid_dim=ENCODER_HID_DIM,
        dec_hid_dim=DECODER_HID_DIM,
    )

    decoder = models.PointerGeneratorDecoder(
        embedding=vocab.create_embedding_layer(),
        output_dim=vocab.vocab_size,
        enc_hid_dim=ENCODER_HID_DIM,
        dec_hid_dim=DECODER_HID_DIM,
        n_layers=1,
        dropout=DROPOUT,
        attention=attention,
    )
    model = models.PointerGeneratorSeq2seq(encoder, decoder)
    criteria = torch.nn.NLLLoss(ignore_index=0)  # ignore padding
    optimizer = torch.optim.Adam(model.parameters())

    dataset = Seq2SeqDataset(train_src, train_tgt, src_vocab=vocab, tgt_vocab=vocab)
    dataloader = DataLoader(
        dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    dev_dataset = Seq2SeqDataset(
        dev_src,
        dev_tgt,
        src_vocab=vocab,
        tgt_vocab=vocab,
    )
    dev_dataloader = DataLoader(
        dev_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    test_dataset = Seq2SeqDataset(
        test_src,
        test_tgt,
        src_vocab=vocab,
        tgt_vocab=vocab,
    )
    test_dataloader = DataLoader(
        test_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn
    )
    train(
        model,
        dataloader,
        dev_dataloader,
        test_dataloader,
        optimizer,
        criteria,
        1,
        vocab,
        saved_folder="checkpoints/seq2seq_attention_with_pointer_generator",
        device=DEVICE,
        max_epoch=EPOCHS,
    )


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Seq2Seq with Attention")
    parser.add_argument(
        "model",
        type=str,
        default="seq2seq_attention_with_glove",
        help="Model to run",
        choices=[
            "seq2seq",
            "seq2seq_attention",
            "seq2seq_attention_with_preprocessing",
            "seq2seq_attention_with_glove",
            "seq2seq_attention_with_shared_embedding",
            "seq2seq_attention_with_stack_layers",
            "seq2seq_attention_with_beamsearch",
            "seq2seq_attention_with_pretrained_model",
            "seq2seq_attention_with_pointer_generator",
        ],
    )
    args = parser.parse_args()
    model = args.model
    globals().get("run_" + model)()
