import torch
from torchtext.data.utils import get_tokenizer
from collections import defaultdict
import pandas as pd


class Vocab(object):
    def __init__(self, *args):
        self.tokenizer = get_tokenizer('basic_english')
        self.vocab2id, self.id2vocab = self._get_vocab(*args)

    def _get_vocab(self, *args):
        assert all([hasattr(i, '__iter__') for i in args]), \
            "Expected iterable arguments"
        vocab2id = defaultdict(int, [('<UNK>', 0), ('<pad>', 1)])
        id2vocab = defaultdict(str, [(0, '<UNK>'), (1, '<pad>')])
        words = set()
        for lines in args:
            for line in lines:
                words |= set(self.tokenizer(line))
        words = list(words)
        vocab2id.update([(words[i], i + 2) for i in range(len(words))])
        id2vocab.update([(i + 2, words[i]) for i in range(len(words))])
        return vocab2id, id2vocab

    def encode_dataset(self, sentence_iter):
        assert hasattr(sentence_iter, '__iter__'), \
            "Expected iterable arguments "
        encoded_sentence_iter = [[self.vocab2id[i] for i in self.tokenizer(line)] for line in sentence_iter]
        return encoded_sentence_iter


def gen_raw_dataset(df_raw, train_ratio=0.9):
    df_col = ['question1', 'question2', 'id', 'is_duplicate']
    assert set(df_col).issubset(set(df_raw.columns)), f"Expected dataframe columns" \
                                                      f"{df_col}"
    split_index = int(len(df_raw) * train_ratio)
    df_train = df_raw[:split_index]
    df_test = df_raw[split_index:]
    dup_id = df_train['id'][df_train['is_duplicate'] == 1].to_numpy()
    q1_train = df_train['question1'][dup_id]
    q2_train = df_train['question2'][dup_id]
    q1_test = df_test['question1']
    q2_test = df_test['question2']
    y_test = df_test['is_duplicate']
    return q1_train, q2_train, q1_test, q2_test, y_test


def gen_train_dataset(q1, q2, vocab2id, max_len=128):
    tensor_list = []
    for i in range(len(q1)):
        q1_pad = max_len - len(q1[i])
        q2_pad = max_len - len(q2[i])
        temp_q1 = torch.tensor(q1[i] + [vocab2id['<pad>'] for _ in range(q1_pad)], dtype=torch.long)
        temp_q2 = torch.tensor(q2[i] + [vocab2id['<pad>'] for _ in range(q2_pad)], dtype=torch.long)
        tensor_list.append((temp_q1, temp_q2))
    return tensor_list


def gen_test_dataset(q1, q2, y, vocab2id, max_len):
    tensor_list = []
    for i in range(len(q1)):
        if max(len(q1[i]), len(q2[i])) < max_len:
            temp_q1 = torch.tensor(q1[i] + [vocab2id['<pad>'] for _ in range(max_len - len(q1[i]))], dtype=torch.long)
            temp_q2 = torch.tensor(q2[i] + [vocab2id['<pad>'] for _ in range(max_len - len(q2[i]))], dtype=torch.long)
            tensor_list.append((temp_q1, temp_q2, y[i]))
    return tensor_list


def qna_data_pipeline(file='data/questions.csv', train_ratio=0.9):
    df_raw = pd.read_csv(file)
    q1_train, q2_train, q1_test, q2_test, y_test = gen_raw_dataset(df_raw, train_ratio)
    vocab = Vocab(q1_train, q2_train)
    q1_train_tensor = vocab.encode_dataset(q1_train)
    q2_train_tensor = vocab.encode_dataset(q2_train)
    q1_test_tensor = vocab.encode_dataset(q1_test)
    q2_test_tensor = vocab.encode_dataset(q2_test)
    y_test_tensor = torch.tensor(y_test.values, dtype=torch.long)
    max_len = max([len(x) for x in q2_train_tensor + q1_train_tensor])
    max_len = min([2 ** i for i in range(10) if 2 ** i > max_len])
    train_dataset = gen_train_dataset(q1_train_tensor, q2_train_tensor, vocab.vocab2id, max_len)
    test_dataset = gen_test_dataset(q1_test_tensor, q2_test_tensor, y_test_tensor, vocab.vocab2id, max_len)
    return train_dataset, test_dataset, vocab


if __name__ == "__main__":
    train_dataset, test_dataset, vocab = qna_data_pipeline(file='data/questions.csv', train_ratio=0.9)
