import json
import os
import re
from collections import Counter
import torch

PAD_token = 0  # 表示padding
SOS_token = 1  # 句子的开始
EOS_token = 2  # 句子的结束

# 文件读取
RawDataPath = "../../Datasets/RawData/LCCC-base-split/LCCC-base_train.json"
# RawDataPath = "../../Datasets/RawData/word-debug2.json"
data_name = os.path.split(RawDataPath)[-1].split('.')[0]
save_dir = "../../Datasets/FinalData"
from src.Main.config import MAX_LENGTH


class Voc:
    def __init__(self, name):
        self.name = name
        self.trimmed = False
        self.word2index = {}
        self.word2count = {}
        self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"}
        self.num_words = 3  # Count SOS, EOS, PAD

    def addSentence(self, sentence):
        for word in sentence.split(' '):
            self.addWord(word)

    def addWord(self, word):
        if word not in self.word2index:
            self.word2index[word] = self.num_words
            self.word2count[word] = 1
            self.index2word[self.num_words] = word
            self.num_words += 1
        else:
            self.word2count[word] += 1

    # 删除频次小于min_count的token
    def trim(self, min_count):
        if self.trimmed:
            return
        self.trimmed = True

        keep_words = []

        for k, v in self.word2count.items():
            if v >= min_count:
                keep_words.append(k)

        print('keep_words {} / {} = {:.4f}'.format(
            len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
        ))

        # 重新构造词典
        self.word2index = {}
        self.word2count = {}
        self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"}
        self.num_words = 3  # Count default tokens
        # 重新构造后词频就没有意义了(都是1)
        for word in keep_words:
            self.addWord(word)


def dataopen(RawDataPath):
    file = open(RawDataPath, 'r', encoding='utf-8')
    txt = file.read()
    return txt


# 数据清洗,只保留汉字和空格
def dataclean(file):
    pattern = re.compile('[\u0020]*[!?。！？…]')
    pattern1 = re.compile('[^\u4e00-\u9fa5]')
    # pattern = re.compile('[^\u4e00-\u9fa5\u0020\u000a]')
    pattern2 = re.compile('[\u002f]*[\u002f]')
    chinese_txt = re.sub(pattern, '', file)
    chinese_txt = re.sub(pattern1, '/', chinese_txt)
    chinese_txt = re.sub(pattern2, ' ', chinese_txt)
    # chinese_txt = re.sub(pattern2, '', chinese_txt)
    # s = re.sub(r"([!?。！？…])", r" \1", file)
    # s = re.sub(r"[^\u4e00-\u9fa5!?。！？…]+", r" ", s)
    # s = re.sub(r"\s+", r" ", s).strip()
    return chinese_txt


def getWordIndex(voc, pairs):
    word_index_pairs = list()
    for pair in pairs:
        word_index_pair = [[], []]
        for word in pair[0].split(" "):
            if word in voc.word2index:
                word_index_pair[0].append(voc.word2index[word])
        for word in pair[1].split(" "):
            if word in voc.word2index:
                # print(word)
                word_index_pair[1].append(voc.word2index[word])
        if len(word_index_pair[0]) > MAX_LENGTH or \
                len(word_index_pair[1]) > MAX_LENGTH or \
                len(word_index_pair[0]) < 2 or \
                len(word_index_pair[1]) < 2:
            pass
        else:
            word_index_pair[0].append(EOS_token)
            while len(word_index_pair[0]) < MAX_LENGTH:
                word_index_pair[0].append(PAD_token)
            word_index_pair[1].append(EOS_token)
            while len(word_index_pair[1]) < MAX_LENGTH:
                word_index_pair[1].append(PAD_token)
            word_index_pairs.append(word_index_pair)
            # print(word_index_pairs)
    return word_index_pairs


def getPairs(path):
    with open(path, "r", encoding='utf-8') as f:
        data_list = json.load(f)
        pairs = list()
        for item in data_list:
            i = 0
            while i + 1 < len(item):
                pair = [[], []]
                pair[0] = dataclean(item[i])
                pair[1] = dataclean(item[i + 1])
                i += 1
                # print(pair)
                length1 = len(pair[0].split(" "))
                length2 = len(pair[1].split(" "))
                if length1 >= MAX_LENGTH - 1 or length1 < 2 or length2 >= MAX_LENGTH - 1 or length2 < 2:
                    pass
                else:
                    pairs.append(pair)
        # print(pairs)
    return pairs


#
if __name__ == "__main__":

    print("-----loading pairs----")
    pairs = getPairs(RawDataPath)
    print(len(pairs))
    print("-----loading voc------")
    voc = Voc(data_name)
    for pair in pairs:
        voc.addSentence(pair[0])
        voc.addSentence(pair[1])
    voc.trim(min_count=5)
    # print(voc.num_words)
    print("-----loading index pairs------")
    pairs = getWordIndex(voc, pairs)
    print(pairs)
    # print(voc.word2index)
    directory = os.path.join(save_dir, 'training_data', data_name)
    if not os.path.exists(directory):
        os.makedirs(directory)
    torch.save(voc, os.path.join(directory, '{!s}.tar'.format('voc')))
    torch.save(pairs, os.path.join(directory, '{!s}.tar'.format('pairs_train')))

