from P03_NER.LSTM_CRF.config import Config
config = Config()

# 构建build_data数据集
def build_data():
    # 定义变量存储三维列表
    datas, sample_x, sample_y, vocab_list = [], [], [], ['PAD', 'UNK']
    # 读取train.txt文件
    with open(file=config.train_path,
              mode='r',
              encoding='utf-8') as fr:
        lines = fr.readlines()
        for line in lines:
            line = line.rstrip().split('\t')
            if len(line) > 1:
                # word, label = line.split('\t')
                word = line[0]
                label = line[-1]
                sample_x.append(word)
                sample_y.append(label)
                # 构建vocab_list
                if word not in vocab_list:
                    vocab_list.append(word)
                # 根据标点符合切分句子
                if word in ['。', '?', '!', '！', '？']:
                    datas.append([sample_x, sample_y])
                    sample_x = []
                    sample_y = []
    # 生成vocab词典文件
    write_file(vocab_list, config.vocab_path)

    # 构建word2index
    word2index = {}
    for idx, item in enumerate(vocab_list):
        word2index[item] = idx
    return datas, word2index

# 保存字典文件
def write_file(words, words_path):
    with open(file=words_path,
              mode='w',
              encoding='utf-8') as fw:
        fw.write('\n'.join(words))

if __name__ == '__main__':
    datas, word2index = build_data()
    print(f'datas-->{len(datas)}')
    print(f'word2index-->{len(word2index)}')
