import copy
import json

import bs4
import requests
from bs4 import BeautifulSoup

import settings
from model import DBSession, SensitiveWord


class WebContent:
    CHAR_ENTITIES = {'nbsp': ' ', '160': ' ',
                     'lt': '<', '60': '<',
                     'gt': '>', '62': '>',
                     'amp': '&', '38': '&',
                     'quot': '"', '34': '"', }

    unrepeated_word = ''
    context = ''

    def __init__(self, html: bs4.BeautifulSoup, word_list: list):
        self.html = html.text
        self.title = html.title.string

        # 存储过滤html标签后的内容
        # self.__content = self.filter_tags()
        # print(self.html == self.__content)

        # 获取敏感词列表
        self.word_list = word_list

        # 快照
        self.snapshot = html.prettify()

    # def __replace_char_entity(self):
    #     html = self.html
    #     re_char_entity = re.compile(r'&#?(?P<name>\w+);')
    #     sz = re_char_entity.search(html)
    #     while sz:
    #         entity = sz.group()  # entity全称，如>
    #         key = sz.group('name')  # 去除&;后entity,如>为gt
    #         try:
    #             html = re_char_entity.sub(self.CHAR_ENTITIES[key], html, 1)
    #             sz = re_char_entity.search(html)
    #         except KeyError:
    #             # 以空串代替
    #             html = re_char_entity.sub('', html, 1)
    #             sz = re_char_entity.search(html)
    #     return html
    #
    # def filter_tags(self):
    #     re_cdata = re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.I)  # 匹配CDATA
    #     re_script = re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.I)  # Script
    #     re_style = re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>', re.I)  # style
    #     re_br = re.compile('<br\s*?/?>')  # 处理换行
    #     re_h = re.compile('</?\w+[^>]*>')  # HTML标签
    #     re_comment = re.compile('<!--[^>]*-->')  # HTML注释
    #     s = re_cdata.sub('', self.html)  # 去掉CDATA
    #     s = re_script.sub('', s)  # 去掉SCRIPT
    #     s = re_style.sub('', s)  # 去掉style
    #     s = re_br.sub('\n', s)  # 将br转换为换行
    #     s = re_h.sub('', s)  # 去掉HTML 标签
    #     s = re_comment.sub('', s)  # 去掉HTML注释
    #     # 去掉多余的空行
    #     blank_line = re.compile('\n+')
    #     s = blank_line.sub('\n', s)
    #     s = self.__replace_char_entity()  # 替换实体
    #     return s

    # 从数据库查询敏感词, 并构建存储结构
    def get_sens(self) -> dict:
        keyword_list = [obj.words for obj in self.word_list]

        state_event_dict = {}  # 敏感词存储采用dict构建
        for keyword in keyword_list:
            current_dict = state_event_dict
            length = len(keyword)

            for index, char in enumerate(keyword):
                if char not in current_dict:
                    next_dict = {'is_end': False}
                    current_dict[char] = next_dict
                    current_dict = next_dict
                else:
                    next_dict = current_dict[char]
                    current_dict = next_dict

                if index == length - 1:
                    current_dict['is_end'] = True

        return state_event_dict

    # 匹配敏感词
    def match_sens(self):
        match_list = []
        state_list = []
        temp_match_list = []

        for char_pos, char in enumerate(self.html):
            if char in self.get_sens():
                state_list.append(self.get_sens())
                temp_match_list.append({
                    'start': char_pos,
                    'match': ''
                })
            for index, state in enumerate(state_list):
                if char in state:
                    state_list[index] = state[char]
                    temp_match_list[index]['match'] += char

                    if state[char]['is_end']:
                        match_list.append(copy.deepcopy(temp_match_list[index]))

                        if len(state[char].keys()) == 1:
                            state_list.pop(index)
                            temp_match_list.pop(index)
                else:
                    state_list.pop(index)
                    temp_match_list.pop(index)

        return match_list

    # 提取上下文
    def filter_context(self):
        match_list = self.match_sens()
        if not match_list:
            return None

        # 敏感词列表去重
        temp = []
        unrepeated = []
        for item in match_list:
            if item['match'] not in temp:
                temp.append(item['match'])

        for obj in self.word_list:
            if obj.words in temp:
                unrepeated.append(obj)

        # 将内容拆分成段落
        context = []
        string = ''
        content = self.html
        for pos in range(len(content)):
            if content[pos] != '\n':
                string += content[pos]
            else:
                if string != '':
                    for obj in unrepeated:
                        if obj.words in string and string not in context:
                            # 存储过滤后的上下文
                            context.append(string)
                    string = ''

        self.unrepeated_word = unrepeated
        self.context = json.dumps(context)
        return True

    @property
    def get_title(self):
        return self.title


# test
if __name__ == '__main__':
    r = requests.get('http://www.kfrsks.com/show.asp?id=1017', headers=settings.Headers)
    h = BeautifulSoup(r.content, 'html.parser')
    # print(h.text)
    word_list = DBSession.query(SensitiveWord).order_by('words').all()
    w = WebContent(h, word_list)
    w.filter_context()
    print(w.unrepeated_word)
    print(w.context)
