import re
import torch
import json
from dataLoader2 import Voc, getWordIndex
from src.Main.config import VOC_PATH, MAX_LENGTH
import os
import jieba

TEST_DATA_PATH = "../../Datasets/RawData/LCCC-base-split/LCCC-base_test.json"
TEST_DATA_OUT_PATH = "../../Datasets/MidData/"
data_name = os.path.split(TEST_DATA_OUT_PATH)[-1].split('.')[0]
save_dir = "../../Datasets/FinalData"


# 数据清洗,只保留汉字和空格
def dataclean(file):
    pattern = re.compile('[\u0020]*[!?。！？…]')
    pattern1 = re.compile('[^\u4e00-\u9fa5]')
    # pattern = re.compile('[^\u4e00-\u9fa5\u0020\u000a]')
    pattern2 = re.compile('[\u002f]*[\u002f]')
    chinese_txt = re.sub(pattern, '', file)
    chinese_txt = re.sub(pattern1, '/', chinese_txt)
    chinese_txt = re.sub(pattern2, ' ', chinese_txt)
    # chinese_txt = re.sub(pattern2, '', chinese_txt)
    # s = re.sub(r"([!?。！？…])", r" \1", file)
    # s = re.sub(r"[^\u4e00-\u9fa5!?。！？…]+", r" ", s)
    # s = re.sub(r"\s+", r" ", s).strip()
    return chinese_txt


def getPairs(path):
    with open(path, "r", encoding='utf-8') as f:
        data_list = json.load(f)
        pairs = list()
        for item in data_list:
            i = 0
            while i + 1 < len(item):
                pair = [[], []]
                pair[0] = dataclean(item[i]).replace(" ", "")
                pair[1] = dataclean(item[i + 1]).replace(" ", "")

                new_pair = ""
                for word in list(jieba.cut(pair[0])):
                    new_pair = new_pair + word + " "
                pair[0] = new_pair[0:len(new_pair) - 1]
                new_pair = ""
                for word in list(jieba.cut(pair[1])):
                    new_pair = new_pair + word + " "
                pair[1] = new_pair[0:len(new_pair) - 1]
                i = i + 1
                print(pair[0])
                length1 = len(pair[0].split(" "))
                length2 = len(pair[1].split(" "))
                if length1 >= MAX_LENGTH - 1 or length1 < 2 or length2 >= MAX_LENGTH - 1 or length2 < 2:
                    pass
                else:
                    pairs.append(pair)
        # print(pairs)
    return pairs


if __name__ == '__main__':

    test_pairs = getPairs(TEST_DATA_PATH)
    voc = torch.load(VOC_PATH)
    pairs = getWordIndex(voc, test_pairs)

    directory = os.path.join(save_dir, 'test_data', data_name)
    if not os.path.exists(directory):
        os.makedirs(directory)

    torch.save(pairs, os.path.join(directory, '{!s}.tar'.format('pairs_test')))
