import spacy
from spacy import displacy
from transformers import BertTokenizer
import json

nlp = spacy.load('en_core_web_trf')

# 读取数据集
def select_data (data):
    # 得到包含词性的句子（用于验证提取结果）
    text_path = 'text/text_tag/{}_text_tag.txt'.format(data)
    with open(text_path, 'r', encoding='utf8') as fp:
        text_tag_datas = fp.readlines()

    # 得到无词性的真正的名词短语（用于验证提取结果）
    ner_path = 'ner/ner/{}_ner.json'.format(data)
    with open(ner_path, 'r', encoding='utf8') as fp:
        ner_datas = fp.readlines()

    # 得到无tag且经过预处理的句子（作为提取名词短语的输入）
    text_path = 'text/text/{}_text.json'.format(data)
    with open(text_path, 'r',  encoding='utf8') as fp:
        text_datas = fp.readlines()

    return text_tag_datas, ner_datas, text_datas

'''
完全正确
train : 7381 / 3468
val : 2483 / 1199
test : 2480 / 1167
包含
train : 7381 / 6521
val : 2483 / 2199
test : 2480 / 2193
current best f1: 0.8600201409869084, epoch: 26
'''

def extract_phrase(sentence):
    doc = nlp(sentence)
    np_list = []
    start_list = []
    for np in doc.noun_chunks:
        np_list.append(np.text)
        start_list.append(np.start)
    return [np_list, start_list]


if __name__ == '__main__':

    for data in ['train', 'val', 'test']:
        text_tag_datas, ner_datas, text_datas = select_data(data)
        ners_len, inclu_correct, ext_correct = 0, 0, 0
        # with open('{}.txt'.format(data), 'a', encoding='utf8') as f3:
        for i in range(0, len(text_datas)):
            sentence = json.loads(text_datas[i])
            ners = json.loads(ner_datas[i])
            ners_len += len(ners)                    # 真正名词短语总个数

            phrases = extract_phrase(sentence)[0]     # 得到名词短语

            # ext_correct += len(list(set(ners) & set(phrases)))       # 提取出的完全正确的个数
            # 输出跨界或未提取出的例子
            for ner in ners:
                for phrase in phrases:
                    if ner in phrase:
                        inclu_correct += 1
                        break
        print(f"{data} : {ners_len} / {inclu_correct}")
