from src.LoadData.dataLoader import dataclean, save_dir, Voc, getWordIndex
from src.Main.config import *
import os


XHJ_PATH = "../../Datasets/RawData/xhj_seg"
data_name = "xiaohuangji"


def getPairs(data_path):
    xhj_pairs = []
    with open(data_path, 'r', encoding='utf-8') as f:
        xhj_pair = [[], []]
        i = 0
        for line in f.readlines():
            line = line.strip()
            if line == "E":
                pass
            else:
                xhj_pair[i] = dataclean(line.replace("M ", "").strip())
                i = i + 1
                if i == 2:
                    length1 = len(xhj_pair[0])
                    length2 = len(xhj_pair[1])
                    if length1 < 2 or length1 >= MAX_LENGTH or length2 < 2 or length2 >= MAX_LENGTH:
                        pass
                    else:
                        xhj_pairs.append(xhj_pair)
                    i = 0
                    xhj_pair = [[], []]
    return xhj_pairs


if __name__ == "__main__":

    print("-----loading pairs----")
    pairs = getPairs(XHJ_PATH)
    print(len(pairs))
    print("-----loading voc------")
    voc = Voc(data_name)
    for pair in pairs:
        voc.addSentence(pair[0])
        voc.addSentence(pair[1])
    voc.trim(min_count=3)
    print(voc.num_words)
    print("-----loading index pairs------")
    pairs = getWordIndex(voc, pairs)
    # print(pairs)
    directory = os.path.join(save_dir, 'training_data', data_name)
    if not os.path.exists(directory):
        os.makedirs(directory)
    torch.save(voc, os.path.join(directory, '{!s}.tar'.format('voc_xhj')))
    torch.save(pairs, os.path.join(directory, '{!s}.tar'.format('pairs_train_xhj')))
