import json
import re

from lxml import etree
from log import my_log

logger = my_log('pingan')


class Extractor(object):
    def __init__(self, data_path, save_path, page_num):
        self.data_path = data_path
        self.save_path = save_path
        self.page_num = page_num

    def parse_all_page(self):
        for i in range(6792, self.page_num):
            with open(self.data_path + str(i) + '.html', mode='r', encoding='utf=8') as f:
                text = f.read()
            print(i)
            tree = etree.HTML(text)
            self.parse_page(tree, i)
            # break

    def parse_page(self, tree, i):
        """
        页面解析
        :param tree: xpath树
        :param i: 记录解析到哪个页面
        :return:
        """
        try:
            disease_name = tree.xpath('//p[@class="name-main"]/text()')[0]
        except IndexError:
            logger.info(f'第{i}个页面疾病名字解析错误')
            return None
        content_list: list = tree.xpath('//div[@class="main-content curTab"]/*')
        # 去除第一个无用的div
        content_list.pop(0)

        # 让一次循环只记录一次log
        flag = 1
        result_list = []
        for index, item in enumerate(content_list):
            if item.xpath('//div/table'):
                # 如果页面中出现table进行记录，后期再处理
                logger.info(f'第{i}个页面出现table')
                return None
            if len(item.xpath('div[@class="para"]')) > 2 and flag:
                # 如果页面中出现特殊层级，也记录下，虽然后面有
                # print(len(item.xpath('div[@class="para"]')))
                flag = 0
                logger.info(f'第{i}个页面中存在层级问题')
                # return None

            # 一级标题
            if item.xpath('span/text()'):
                result_list.append({'content': item.xpath('span')[0].xpath('string(.)').strip(), 'type': 1})

            # 三级标题 + 文本  三级标题 + 四级标题 + 文本
            elif list(filter(Extractor.not_empty, item.xpath('div[@class="para"]/strong/text()'))) and list(
                    filter(Extractor.not_empty, item.xpath('text()'))):

                # 获取div[class='para']下的第一个p标签，因为其中可能有文本
                p = list(filter(Extractor.not_empty, item.xpath('p/text()')))
                if p:
                    result_list.append({'content': p[0], 'type': 5})

                # 获取标题
                p1 = list(filter(Extractor.not_empty, item.xpath('div[@class="para"]/strong/text()')))
                # 获取文本    一个标题，一个标题后接一个文本
                p2 = list(
                    filter(Extractor.not_empty, item.xpath('text()')))
                # print(p1, p2)
                if p1 and p2:
                    j = 0
                    while p1 or p2:
                        # 如果len(p1) 大于 len(p2) 说明 三级标题下面的是空文本 所以先放一个三级标题，然后在放剩余的
                        if len(p1) > len(p2):
                            if j == 0 and (
                                    re.findall('（(\d+)）', p1[0].strip()) or re.findall('^(\d+)）$', p1[0].strip())):
                                if p1:
                                    result_list.append({'content': p1.pop(0).strip(), 'type': 3})
                            else:
                                if p1:
                                    result_list.append({'content': p1.pop(0).strip(), 'type': 4})
                                if p2:
                                    result_list.append({'content': p2.pop(0).strip(), 'type': 5})
                        else:
                            if j == 0 and (
                                    re.findall('（(\d+)）', p1[0].strip()) or re.findall('^(\d+)）$', p1[0].strip())):
                                if p1:
                                    result_list.append({'content': p1.pop(0).strip(), 'type': 3})
                                if p2:
                                    result_list.append({'content': p2.pop(0).strip(), 'type': 5})
                            else:
                                if p1:
                                    result_list.append({'content': p1.pop(0).strip(), 'type': 4})
                                if p2:
                                    result_list.append({'content': p2.pop(0).strip(), 'type': 5})
                        j = j + 1

            # 二级标题
            elif item.xpath('div[@class="para"]/strong/text()'):
                result_list.append(
                    {'content': item.xpath('div[@class="para"]/strong')[0].xpath('string(.)').strip(), 'type': 2})

            # 二级标题
            elif item.xpath('strong/text()'):
                result_list.append(
                    {'content': item.xpath('strong')[0].xpath('string(.)').strip(), 'type': 2})
            # 纯文本
            elif item.xpath('p/text()'):
                result_list.append({'content': item.xpath('p')[0].xpath('string(.)').strip(), 'type': 5})
        # print(result_list)
        self.remove_blank_content(result_list)
        self.add_id(result_list)
        new_result_list = self.add_parent_id(result_list)
        json_list = self.get_json_tree(new_result_list, -1)
        self.write_to_file(self.save_path, {'title': disease_name, 'content': json_list}, i)

    def get_json_tree(self, data, parent_id):
        content = []
        for item in data:
            if item['parent_id'] == parent_id:
                if item['type'] == 5:
                    new_node = {'text': item['content']}
                else:
                    new_node = {'title': item['content'], 'content': self.get_json_tree(data, item['id'])}
                content.append(new_node)
        return content

    def add_id(self, rlist):
        for index, item in enumerate(rlist):
            item['id'] = index

    def add_parent_id(self, rlist):
        new_rlist = []
        for index, p in enumerate(rlist):
            for i, p2 in enumerate(rlist[index::-1]):
                if p2['type'] < p['type']:
                    p['parent_id'] = p2['id']
                    break
            if 'parent_id' not in p:
                p['parent_id'] = -1
            new_rlist.append(p)
        return new_rlist

    def write_to_file(self, path, data, id):
        with open(path + str(id) + '.json', mode='w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False)

    def remove_blank_content(self, l):
        for i in range(len(l) - 1, -1, -1):
            if not l[i]['content']:
                l.pop(i)

    @staticmethod
    def not_empty(s):
        """
        用于去除'\ufeff'字符串
        """
        return s.encode('utf-8').decode('utf-8-sig') and s.encode('utf-8').decode('utf-8-sig').strip()


def main():
    data_path = 'D:/PycharmProjects/pingan_data/'
    save_path = 'D:/PycharmProjects/pingan_data/json_data/'
    page_num = 6793
    extractor = Extractor(data_path=data_path, save_path=save_path, page_num=page_num)
    extractor.parse_all_page()


if __name__ == '__main__':
    main()
