"""
抽取出表示关系语句
"""
from bs4 import BeautifulSoup
import shutil
import re
import os
import csv
import sys
from tqdm import tqdm
# import nltk
# from nltk.tokenize import sent_tokenize
import numpy as np
import spacy
from spacy.matcher import Matcher
import myutils

# nltk.download('punkt')

nlp = spacy.load("en_core_web_md")

# 定义匹配模式

all_patterns = [
    [{"POS":"VERB","OP":"*"}, {"POS":"VERB","OP":"+"}, {"POS":"NUM","OP":"*"}, {"POS":"ADJ","OP":"*"}, {"POS":"ADV","OP":"*"}, {"POS":"ADJ","OP":"*"}, {"POS":"VERB","OP":"?"}, {"POS":"DET","OP":"?"}],
    [{"POS":"VERB","OP":"*"}, {"POS":"VERB","OP":"+"}, {"POS":"ADJ","OP":"*"}, {"POS":"ADV","OP":"*"}, {"POS":"ADJ","OP":"*"}, {"POS":"VERB","OP":"?"}, {"POS":"DET","OP":"?"}, {"POS":"PART","OP":"+"}, {"POS":"VERB","OP":"+"}],
    [{"POS":"VERB","OP":"*"}, {"POS":"VERB","OP":"+"}, {"POS":"ADJ","OP":"*"}, {"POS":"ADV","OP":"*"}, {"POS":"ADJ","OP":"*"}, {"POS":"VERB","OP":"?"}, {"POS":"DET","OP":"?"}, {"POS":"ADP","OP":"+"}, {"POS":"VERB","OP":"+"}],
    [{"POS":"VERB"}, {"POS":"ADV"}, {"POS":"DET","OP":"*"}, {"POS":"ADJ","OP":"*"}, {"POS":"NOUN"}],
    [{"POS":"VERB"}, {"POS":"ADV"}, {"POS":"DET"}, {"POS":"VERB"}, {"POS":"NOUN","OP":"*"}, {"POS":"NOUN"},{"POS":"VERB"}],
]

# # 因果
# cause_effect_patterns = [
#     [{"POS":"VERB","OP":"*"}, {"POS":"VERB","OP":"+"}, {"POS":"ADJ","OP":"*"}, {"POS":"ADV","OP":"*"}, {"POS":"ADJ","OP":"*"}, {"POS":"VERB","OP":"+"}, {"POS":"DET","OP":"?"}, {"POS":"PART","OP":"+"}, {"POS":"VERB","OP":"+"}],
#     [{"POS":"VERB"}, {"POS":"ADV"}, {"POS":"DET","OP":"*"}, {"POS":"ADJ","OP":"*"}, {"POS":"NOUN"}],
#     [{"POS":"VERB"}, {"POS":"ADV"}, {"POS":"DET"}, {"POS":"VERB"}, {"POS":"NOUN","OP":"*"}, {"POS":"NOUN"},{"POS":"VERB"}],
# ]
# # 整体-部分
# component_whole_patterns = [
#     [],
#     []
# ]


matcher = Matcher(nlp.vocab)
# 添加匹配模式
matcher.add("all_patterns", all_patterns, greedy="LONGEST")
# matcher.add("cause_effect", cause_effect_patterns)
# matcher.add("component_whole", component_whole_patterns)

def extract_relationship_sentence(html_path):
    """
    todo
    """
    with open(html_path, "r", encoding="utf-8", errors="ignore") as html_doc:
        soup = BeautifulSoup(html_doc, "lxml")
    html_content = soup.body.get_text()
    html_content = html_content.replace("\n", " ").lower()

    # 匹配
    doc = nlp(html_content)
    # 存储每个sent及其position
    sent_and_position = list()
    for sent in doc.sents:
        # print([(sent.start, sent.end),sent])
        # sent起始和终止位置和当前sent
        sent_and_position.append([(sent.start, sent.end),str(sent).strip()])

    matches = matcher(doc)

    relation_sentences_csv = myutils.open_csv("relation_sentences")
    relation_sentences_csv.writerow(["matching_pattern", "sentence"])

    for match_id, start, end in matches:
        # 匹配上的pattern
        pattern_id = nlp.vocab.strings[match_id]  # Get string representation
        # 定位到匹配的句子
        for item in sent_and_position:
            position = item[0]
            if(start >= position[0] and end <= position[1]):
                sentence = item[1]
        # print(pattern_id, sentence)
        relation_sentences_csv.writerow([pattern_id, sentence])

    # doc = nlp("Raised when a floating point operation fails")

    # for word in doc:
    #     print((word, word.pos_))

    # 打印词和词性
    # sents = list(doc.sents)
    # # print(sents)
    # for word in list(doc.sents)[10]:
    #     print((word, word.pos_))

    # 名词短语
    # for np in list(doc.sents)[10].noun_chunks:
    #     print(np)
    # print(html_content)
    # os.system("pause")
    # 分句
    # html_sents = sent_tokenize(html_content)
    # index:34
    # print(html_sents.index('exception assertionerror raised when an assert statement fails.'))
    # sent = html_sents[34]
    # doc = nlp(sent)
    # for token in doc:
    #     print(token.text, "-->", token.pos_)
    # print(sent)
    # print(nltk.pos_tag(nltk.word_tokenize(sent)))
    # print(type(html_sents), len(html_sents))
    # with open("txts/1.txt", "w", encoding="utf-8") as wf:
    #     wf.write(html_content.replace("\n", " ").lower())


html_path = "D:/Chrome/Dl/python362/library/exceptions.html"
extract_relationship_sentence(html_path)
