'''
得到解决完spacy分词的文本句子
'''
import json
import spacy

nlp = spacy.load('en_core_web_trf')

# 加载文本数据
label_path = '../../../dataset/twitter2017/texts/train_text.json'
tokens_data = []
labels_data = []
with open(label_path, 'r', encoding='utf8')as fp:
    text_json_data = json.load(fp)
for i in range(0, len(text_json_data)):
    tokens_data.append(text_json_data[i]["tokens"])


def texts(data):
    sentence = ""
    flag = 0
    pro_list = []
    if data[len(data) - 1] == '-' and data[len(data) - 2] == '-':
        data = data[0:len(data) - 2]
    for word in data:
        if "http" not in word:
            word_tokens = nlp.tokenizer(word)  # 得到分词结果
            if len(sentence) == 0:
                if len(word_tokens) > 1:
                    sentence = word_tokens[0].text
                    pro_list.append(word)
                elif len(word_tokens) == 0:
                    sentence = '?'
                else:
                    sentence = word
            else:
                if len(word_tokens) > 1:
                    flag = 1
                    pro_list.append(word)
                    sentence = sentence + ' ' + word_tokens[0].text  # 分词为多个时，取第一个分词作为最终分词结果
                elif len(word_tokens) == 0:
                    sentence = sentence + ' ' + '?'  # 分词为空时，用'?'替代
                else:
                    sentence = sentence + ' ' + word
    return sentence, flag, pro_list

if __name__ == '__main__':
    with open('train.json', 'a', encoding='utf8') as f:
        for data in tokens_data:
            sentence, flag, pro_list = texts(data)
            if flag == 1:
                f.write(json.dumps(' '.join(data)) + '\n')
                f.write(json.dumps(' '.join(pro_list)) + '\n')
                f.write(json.dumps(sentence) + '\n' + "-----------------------------------------" + '\n')