import pandas as pd
from itertools import chain
from tqdm import tqdm
import numpy as np
import random
import os
import pickle
import re
import jieba
import jieba.posseg as pseg
from args import Args

class TextData:

    def __init__(self, args):

        self.args = args

        self.sr_word2id = None
        self.sr_id2word = None
        self.sr_line_id = None

        self.train_samples = []  # train samples with id

        self.padToken = "<pad>"  # Padding
        self.goToken = "<go>"  # Start of sequence
        self.eosToken = "<eos>"  # End of sequence
        self.unknownToken = "<unk>"  # Word dropped from vocabulary
        self.numToken = 4 # Num of above Tokens

        self.load_data()
        self.build_word_dict()
        self.generate_conversations()
        
        self.vocab_size = len(self.sr_id2word)


    def load_data(self):
        if not os.path.exists(self.args.sr_word_id_path) or not os.path.exists(self.args.train_samples_path):
            # 读取 movie_lines.txt 和movie_conversations.txt两个文件
            print("开始读取数据")
            self.conversations = pd.read_csv(self.args.conv_path, nrows =200000, names=["first_conv", "second_conv"],
                                sep='\t', header=None)

            self.conversations.first_conv = self.conversations.first_conv.apply(lambda conv : self.word_tokenizer(conv))
            self.conversations.second_conv = self.conversations.second_conv.apply(lambda conv : self.word_tokenizer(conv))
            print("数据读取完毕")

    def build_word_dict(self):
        if not os.path.exists(self.args.sr_word_id_path):
            # 得到word2id和id2word两个词典
            print("开始构建词典")
            words = pd.concat([self.conversations.first_conv,self.conversations.second_conv], ignore_index=True)
            words = words.values
            words = list(chain(*words))


            sr_words_count = pd.Series(words).value_counts()
            # 筛选出 出现次数 大于 1 的词作为 vocabulary
            sr_words_size = np.where(sr_words_count.values > self.args.vacab_filter)[0].size
            sr_words_index = sr_words_count.index[0:sr_words_size]

            self.sr_word2id = pd.Series(range(self.numToken, self.numToken + sr_words_size), index=sr_words_index)
            self.sr_id2word = pd.Series(sr_words_index, index=range(self.numToken, self.numToken + sr_words_size))
            self.sr_word2id[self.padToken] = 0
            self.sr_word2id[self.goToken] = 1
            self.sr_word2id[self.eosToken] = 2
            self.sr_word2id[self.unknownToken] = 3
            self.sr_id2word[0] = self.padToken
            self.sr_id2word[1] = self.goToken
            self.sr_id2word[2] = self.eosToken
            self.sr_id2word[3] = self.unknownToken
            print("词典构建完毕")
            with open(os.path.join(self.args.sr_word_id_path), 'wb') as handle:
                data = {
                    'word2id': self.sr_word2id,
                    'id2word': self.sr_id2word,
                }
                pickle.dump(data, handle, -1)
        else:
            print("从{}载入词典".format(self.args.sr_word_id_path))
            with open(self.args.sr_word_id_path, 'rb') as handle:
                data = pickle.load(handle)
                self.sr_word2id = data['word2id']
                self.sr_id2word = data['id2word']

    def word_tokenizer(self, sentence):
        # 结巴分词
        rerule = '【.+】|#.+#|（.+）|「.+」|[^\u4e00-\u9fa5|，|。|！|？|……]'
        sentence = re.sub(rerule, "", sentence)
        words = pseg.cut(sentence, use_paddle=True)
        result = []
        for word, flag in words:
            result.append(word)
        return result
    
    def replace_word_with_id(self, conv):
        conv = list(map(self.get_word_id, conv))
        # temp = list(map(self.get_id_word, conv))
        return conv

    def get_word_id(self, word):
        if word in self.sr_word2id:
            return self.sr_word2id[word]
        else:
            return self.sr_word2id[self.unknownToken]

    def get_id_word(self, id):
        if id in self.sr_id2word:
            return self.sr_id2word[id]
        else:
            return self.unknownToken

    def filter_conversations(self, first_conv, second_conv):
        # 筛选样本， 首先将encoder_input 或 decoder_input大于max_length的conversation过滤
        # 其次将target中包含有UNK的conversation过滤
        valid = True
        valid &= len(first_conv) <= self.args.maxLength
        valid &= len(second_conv) <= self.args.maxLength
        valid &= second_conv.count(self.sr_word2id[self.unknownToken]) == 0

        return valid

    def generate_conversations(self):

        if not os.path.exists(self.args.train_samples_path):
            # 将word替换为id
            # self.replace_word_with_id()
            print("开始生成训练样本")
            # 将id与line作为字典，以方便生成训练样本
            for line_id in tqdm(range(len(self.conversations.first_conv.values)-1), ncols=10):

                first_conv = self.conversations.first_conv[line_id]
                second_conv = self.conversations.second_conv[line_id]

                first_conv = self.replace_word_with_id(first_conv)
                second_conv = self.replace_word_with_id(second_conv)
                valid = self.filter_conversations(first_conv, second_conv)

                if valid :
                    temp = [first_conv, second_conv]
                    self.train_samples.append(temp)

            print("生成训练样本结束")
            with open(self.args.train_samples_path, 'wb') as handle:
                data = {
                    'train_samples': self.train_samples
                }
                pickle.dump(data, handle, -1)
        else:
            with open(self.args.train_samples_path, 'rb') as handle:
                data = pickle.load(handle)
                self.train_samples = data['train_samples']
            print("从{}导入训练样本".format(self.args.train_samples_path))
        
    def sen2enco(self, sentence):
        sentence = self.word_tokenizer(sentence)
        enco = [1] + self.replace_word_with_id(sentence) + [2]
        return enco


if __name__ == '__main__':
    args = Args()    
    textData = TextData(args)