'''
得到包含词性的文本，并根据词性标注结果解决分词导致长度不一致的问题
'''
import json
import spacy
device = 'cpu'
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"

nlp = spacy.load('en_core_web_trf')

# 加载处理完spacy分词问题的文本
with open('../no_spacy_pro/train_text.json', 'r', encoding='utf8') as fp:
    text_data = fp.readlines()

# 得到包含词性的文本
def get_text_tag(text):
    character = nlp(text)
    character_len = len(character)
    text_tag_str = character[0].text + '(' + character[0].tag_ + ')'
    for i in range(1, character_len):
        text_tag_str += ' ' + character[i].text + '(' + character[i].tag_ + ')'
    return text_tag_str
if __name__ == '__main__':
    with open('train_text_tag.txt', 'w', encoding='utf8')as f:
        for text in text_data:
            f.write(get_text_tag(json.loads(text)) + '\n')


