from collections import defaultdict
import pandas as pd
import re
import torch
import config

def load_data(path, sample=False):
    df = pd.read_table(path, header=None)
    if sample:
        df = df[:500]
    return df


class Vocab(object):
    def __init__(self,lang,BOS=None,EOS=None,UNK=None,PAD=None,max_len=None,is_pad=None):
        """
        naive dictionary, usually, we use hash to speed up word look up
        :param lang: [str] en or cn
        :param token_dict: [dict] {str:int} addtional token for beging of sentence etc.
        """
        assert (is_pad and max_len) or not is_pad ,"Argument max_len is required for padding"
        assert (is_pad and PAD) or not is_pad, "Argument PAD token is required for padding"
        self.token_str = ["sos","eos","unk","pad"]
        self.token = {i:None for i in self.token_str}
        for i in range(4):
            if [BOS,EOS,UNK,PAD][i] is not None:
                self.token[self.token_str[i]]= [BOS,EOS,UNK,PAD][i]
        self.lang = lang
        self.vocab2id = defaultdict(int, {i:self.token[i] for i in self.token_str if self.token[i] is not None})
        self.id2vocab = defaultdict(str, {self.token[x]:x for x in self.token_str})
        self.vocab_size = len(self.vocab2id)
        self.processer = self.genProcesser()
        self.words = set(self.vocab2id.keys())
        self.is_pad = is_pad
        self.max_len = max_len
        assert len(list({BOS,EOS,UNK,PAD}-{None})) == self.vocab_size,"BOS,EOS,UNK,PAD should not be indentical"

    def genProcesser(self):
        if self.lang == "en":
            def enProcessor(s):
                s = s.lower()
                s = re.sub(r"([?.!])", r" \1 ", s)
                s = re.sub(r'[" "]+', " ", s)
                s = re.sub(r"[^a-zA-Z?.!,¿]+", " ", s)
                sos = self.token_str[0] if self.token[self.token_str[0]] is not None else ""
                eos = self.token_str[1] if self.token[self.token_str[1]] is not None else ""
                s = f"{sos} {s} {eos}"
                return s
            return enProcessor
        elif self.lang == "cn":
            def processer_cn(s):
                """
                Chinese token need to be improved
                """
                s = re.sub(r"([？。，！])", r" \1 ", s)
                # 将每个中文后面加上空格，便于分字
                s = re.sub(r"([\u2E80-\u9FFF？。，！])", r"\1 ", s)
                s = re.sub(r'[" "]+', " ", s)
                s = re.sub(r"[^\u2E80-\u9FFF？。，！]+", " ", s)
                sos = self.token_str[0] if self.token[self.token_str[0]] is not None else ""
                eos = self.token_str[1] if self.token[self.token_str[1]] is not None else ""
                s = f"{sos} {s} {eos}"
                return s
            return processer_cn
        else:
            assert "Language do not match"

    def update(self, *args):
        """
        update vocabulary
        :param args: raw sentences
        :return:
        """
        assert all([hasattr(i, '__iter__') for i in args]), \
            "Expected iterable arguments"
        words = set()
        for lines in args:
            for line in lines:
                words |= set(self.processer(line).split())
        if words in self.words:
            return 0
        words = list(words-self.words)
        self.vocab2id.update([(words[i], i + 2) for i in range(len(words))])
        self.id2vocab.update([(i + 2, words[i]) for i in range(len(words))])
        self.vocab_size = len(self.vocab2id)
        self.words = set(self.vocab2id.keys())

    def encode_dataset(self, sentence_iter):
        assert hasattr(sentence_iter, '__iter__'), \
            "Expected iterable arguments "
        unk = self.vocab2id.get(self.token_str[2],-1)
        assert unk!= -1, "Unknown word token is not define"
        before_tensor = [[self.vocab2id.get(i,unk)
                      for i in self.processer(line).split()]
                     for line in sentence_iter]
        encoded_s = [torch.tensor(x,dtype=config.dtype)
                     for x in before_tensor if len(x)<=self.max_len]
        if self.is_pad:
            pad = self.vocab2id.get(self.token_str[3])
            encoded_s = [torch.tensor(x + [pad]*(self.max_len-len(x)),dtype=config.dtype)
                         for x in encoded_s if len(x)<=self.max_len]
        return encoded_s


def filter_by_len(df,max_len):
    con1 = df[0].apply(lambda x:len(x)<max_len)
    con2 = df[1].apply(lambda x:len(x)<max_len)
    df = df[con1 & con2]
    return df


def data_process(path):
    df = load_data(path, sample=config.debuging)
    df = filter_by_len(df,config.max_len)
    # BOS, EOS, UNK is fixed to 1,2,3 for NMT
    en_raw, cn_raw = df[0], df[1]
    en_vocab = Vocab('en',1,2,3,max_len=config.max_len)
    cn_vocab = Vocab('cn',1,2,3,max_len = config.max_len)
    en_vocab.update(en_raw)
    cn_vocab.update(cn_raw)
    en_train = en_vocab.encode_dataset(en_raw)
    cn_train = cn_vocab.encode_dataset(cn_raw)
    return en_vocab, cn_vocab,en_train,cn_train

