import re
from collections import Counter
import networkx as nx
import spacy
from itertools import combinations
from spacy.matcher import Matcher
from retrieval.process import get_text
from retrieval.rate import RatingSystem

entity_dict = {
    "PERSON": "人名",
    "NORP": "政治团体",
    "FAC": "建筑物",
    "ORG": "组织机构",
    "GPE": "地名",
    "LOC": "地理区域",
    "PRODUCT": "产品",
    "EVENT": "事件",
    "WORK_OF_ART": "艺术作品",
    "LAW": "法律",
    "LANGUAGE": "语言",
    "DATE": "日期",
    "TIME": "时间",
    "PERCENT": "百分比",
    "MONEY": "货币金额",
    "QUANTITY": "度量衡",
    "ORDINAL": "序数词",
    "CARDINAL": "基数词",
}


def spacy_match(txt, pattern=None, describe=''):
    global nlp
    matcher = Matcher(nlp.vocab)
    research_purpose_pattern = [{"LOWER": "to"}, {"POS": "VERB"}]
    method_description_pattern = [{"DEP": {"IN": ["nsubj", "nsubjpass"]}}, {"POS": "VERB"},
                                  {"DEP": {"IN": ["dobj", "attr"]}}]
    experimental_results_pattern = [{"DEP": {"IN": ["nsubj", "nsubjpass"]}}, {"POS": "VERB"},
                                    {"DEP": {"IN": ["dobj", "attr"]}}]
    research_objects_pattern = [{"POS": "NOUN"}, {"DEP": "prep"}, {"POS": "NOUN"}]
    keywords_pattern = [{"POS": "ADJ"}, {"POS": "NOUN"}]
    location_pattern = [{"ENT_TYPE": "GPE"}]
    author_pattern = [{"POS": "PROPN"}, {"POS": "PROPN"}]
    time_pattern = [{"ENT_TYPE": "DATE"}]

    # 将每个pattern添加到Matcher中
    matcher.add("ResearchPurpose", [research_purpose_pattern])
    matcher.add("MethodDescription", [method_description_pattern])
    matcher.add("ExperimentalResults", [experimental_results_pattern])
    matcher.add("ResearchObjects", [research_objects_pattern])
    matcher.add("Keywords", [keywords_pattern])
    matcher.add("Location", [location_pattern])
    matcher.add("Author", [author_pattern])
    matcher.add("Time", [time_pattern])

    if pattern is not None:
        matcher.add(describe, [pattern])

    mdoc = nlp(txt)
    matches = matcher(mdoc)

    for match_id, start, end in matches:
        matched_span = doc[start:end]
        pattern_name = nlp.vocab.strings[match_id]  # 获取pattern的名称
        print("Matched Pattern: ", pattern_name)
        print("Text: ", matched_span.text)
        print()


def re_match(txt, pattern=r'\d{1,2}\s+\w+\s+\d{4}'):
    matches = re.findall(pattern, txt)
    for match in matches:
        print('re: ' + match, end='')


def textrank(doc):
    G = nx.Graph()
    words = [(token.text, token.pos_) for token in doc if
             not token.is_punct and token.pos_ not in ["ADP", "DET", "SPACE"]]

    G.add_nodes_from(words)
    # 基于共现关系添加边
    for word1, word2 in combinations(words, 2):
        G.add_edge(word1, word2)
    scores = nx.pagerank(G)
    # 根据节点重要性进行排序
    sorted_scores = sorted(scores.items(), key=lambda x: x[1], reverse=True)
    # 提取前n个关键词
    n = 4
    for rank, (word, score) in enumerate(sorted_scores[:n], 1):
        print("Rank:", rank)
        print("Keyword:", word)
        print("Score:", score)
        print()


if __name__ == '__main__':
    texts = get_text()
    nlp = spacy.load('en_core_web_sm')

    while True:
        paper_id = int(input('待抽取文档编号（1-500）：'))
        if paper_id == 0:
            exit(0)

        for index, txt in enumerate(texts):
            if paper_id != index:
                continue
            doc = nlp(txt)
            # 基本信息抽取（5类关键点）
            ids = [(e.text, e.label_) for e in doc.ents]
            label_counts = Counter(label for _, label in ids)
            top_labels = sorted(label_counts, key=lambda x: label_counts[x], reverse=True)[:5]
            print(top_labels)
            length = 5 if len(top_labels) >= 5 else len(top_labels)
            num = 0  # 检测出的实体数量
            for i in range(0, length):
                print(entity_dict[top_labels[i]], end=': ')
                entities = [text for text, label in ids if label == top_labels[i]]
                num += len(entities)
                print(entities)
            print(f'实体总数量：{len(ids)}，检测出实体数量：{num}')
            tp = int(input('抽取出实体中，准确的数量：'))
            fp = int(input('未抽取实体中，准确的篇数：'))
            recall, precision, f1 = RatingSystem.scores(num, tp, fp, len(ids))
            print(f'准确率：{precision}，召回率：{recall}，f1-score：{f1}\n')
            score = float(input('请为此次搜索满意度打分[0-10]: '))
            RatingSystem.save('ratings.txt', score, f'file_{index}')

            # --------- 附加内容 -------------

            # 选出文章关键词
            # textrank(doc)

            # 基于正则关系的匹配，默认筛选日期
            # re_match(txt)

            print('\n')


        # 234\116\456\347\481