import sys

sys.path.append(r'D:\mydata\heima\heimaPY\KG\LSTM_CRF')

from config import *
config = Config()


def build_data():
    """
    构造数据集：对train.txt进行分析处理，得到x，y样本对，标点符号作为样本分割
    :return:
    """
    # 设置存储列表:样本对、x、y,词表
    datas = []
    sample_x = []
    sample_y = []
    vocab_list = ["PAD", "UNK"]
    # 读取train.txt文档
    with open(config.train_path, mode='r', encoding='utf-8') as ft:
        # 遍历每一行
        for line in ft.readlines():
            # print(line)
            line = line.strip().split('\t')
            # print(line)
            # 跳过异常数据
            if not line:
                continue
            word = line[0]
            if not word:
                continue
            if len(line) != 2:
                continue
            tag = line[-1]
            sample_x.append(word)
            sample_y.append(tag)
            if word not in vocab_list:
                vocab_list.append(word)
            if word in ['?', '!', '？', "。", '!']:
                datas.append([sample_x, sample_y])
                sample_x = []
                sample_y = []

    # 构造word2id
    word2id = {word: index for index, word in enumerate(vocab_list)}

    # 保存词表
    write_file(vocab_list, config.vocab_path)

    return datas, word2id


def write_file(vocab_list, file_path):
    """
    将词汇数据写入文件中LSTM_CRF\vocab\vocab.txt
    :return:
    """
    with open(file_path, mode='w', encoding='utf-8') as fv:
        fv.write('\n'.join(vocab_list))


if __name__ == '__main__':
    datas, word2id = build_data()
    print(datas)
    print(word2id)
