import pandas as pd
import torch
from more_itertools import chunked
from itertools import chain
import torch
from typing import List, Tuple
from datautils import Vocabulary


MAX_LEN = 150
EPOCHS = 10
BATCH_SIZE = 64

EMBEDDING_DIM = 100
ENCODER_HID_DIM = 256
DECODER_HID_DIM = 256
DROPOUT = 0.1
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"


def load_texts_from_csv(file_path: str) -> Tuple[List[str], List[str]]:
    dataframe = pd.read_csv(file_path)
    dataframe = dataframe.dropna(subset=["Ingredients", "Recipe"])
    ingredients = dataframe["Ingredients"].tolist()
    recipes = dataframe["Recipe"].tolist()
    return ingredients, recipes


print("Loadding Vocab...")
train_src, train_tgt = load_texts_from_csv("./data/train.csv")
vocab = Vocabulary.from_text(train_src + train_tgt, embedding_dim=EMBEDDING_DIM)
src_vocab = Vocabulary.from_text(train_src, embedding_dim=EMBEDDING_DIM)
tgt_vocab = Vocabulary.from_text(train_tgt, embedding_dim=EMBEDDING_DIM)
print("Loadded")

MAX_LEN = 150
BATCH_SIZE = 16
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"


def run(src: List[str], model_path: str, shared_embedding: bool = False):
    if shared_embedding:
        svb = vocab
        tvb = vocab
    else:
        svb = src_vocab
        tvb = tgt_vocab
    src_tokens = [svb.numericalize(s, max_len=150) for s in src]
    model = torch.load(model_path).to(DEVICE)

    hyps = []
    for batch in chunked(src_tokens, BATCH_SIZE):
        batch = [torch.tensor(item, dtype=torch.long) for item in batch]
        batch = torch.nn.utils.rnn.pad_sequence(batch, batch_first=True)
        ptokens = model.generate(batch.to(DEVICE))
        for tk in ptokens:
            hyp = tvb.denumericalize(tk.cpu().numpy().tolist())
            hyps.append(hyp)

    return hyps


models = {
    "Generated Recipe - Baseline 1": "./checkpoints/seq2seq/best.pt",
    "Generated Recipe - Baseline 2": "./checkpoints/seq2seq_attention/best.pt",
    "Generated Recipe - Extended 1": "./checkpoints/seq2seq_attention_with_shared_embedding/best.pt",
    "Generated Recipe - Extended 2": "./checkpoints/seq2seq_attention_with_stack_layers/best.pt",
    "Generated Recipe - Extended GPN": "./checkpoints/seq2seq_attention_with_pointer_generator/best.pt",
}

input_file = "./Cooking_Dataset/generated_012345678.csv"
output_file = "./Cooking_Dataset/submission.csv"


def main():
    dataframe = pd.read_csv(input_file)
    ingredients = dataframe["Ingredients"].tolist()
    for i, (column, model_path) in enumerate(models.items()):
        print(f"Runing {column}...")
        shared_embedding = i == 2
        recipe = run(ingredients, model_path, shared_embedding=shared_embedding)
        dataframe[column] = recipe

    dataframe.to_csv(output_file, index=False)


if __name__ == "__main__":
    main()
