"""
从原始页面提取xpath列表，获取p对象，通过p对象的不同属性
分级，定义type类型如下

1： 一级标题
2： 二级标题
3： 三级标题
4:  普通文本

<dl class="links"> dt  -> 0
text-indent:2em;font-size:18px;font-weight:bold; ->  1
text-indent:2em;font-size:16px;font-weight:bold; ->  2
text-indent:2em;font-size:15px;font-weight:bold; ->  3
text-indent:2em;                                 ->  4


然后在定义type类型的基础上，给p对象增加父节点属性，表示从属关系，获得当前节点的父节点id的方法：往前找到第一个type值比其小的节点的id及为其父节点的id

"""
import json
import re

from lxml import etree


class Disease:
    """
    疾病路径索引
    """
    HOME = '/home.html'
    INTRO = '/jbzs.html'
    SYMPTOM = '/zztz.html'  # 症状和原因结合在一起，症状中如果出现 诊断 需要将其放到 （检查与诊断下）
    REASON = '/blby.html'
    EXAM = '/jcjb.html'  # 检查 诊断 鉴别 一起
    IDENTIFICATION = '/jb.html'
    TREATMENT = '/yyzl.html'  # 治疗
    PREVENTION = '/yfhl.html'  # 预防护理和饮食保健
    NURSING = '/hl.html'
    DIET = '/ysbj.html'
    COMPLICATION = '/bfbz.html'


def _read_content(path):
    """
    读取html页面的内容
    :param path:
    :return: xpath对象
    """
    try:
        with open(path, mode='r', encoding='gb18030') as f:
            content = f.read()
            return etree.HTML(content)
    except FileNotFoundError:
        return None


type_table = {
    'links': 0,
    'text-indent:2em;font-size:18px;font-weight:bold;': 1,
    'font-size:18px;font-weight:bold;': 1,
    'text-indent:2em;font-size:16px;font-weight:bold;': 2,
    'white-space: normal;': 2,
    'font-size:16px;font-weight:bold;': 2,
    'text-indent:2em;font-size:15px;font-weight:bold;': 3,
    'font-size:15px;font-weight:bold;': 3,
    'text-indent:2em;text-indent: 2em; font-size: 15px; font-weight: bold;': 3,
    'text-indent:2em;': 4,
    'text-indent:2em;text-align: left;': 4,
    'text-indent:2em;text-align:center;': 4,
    'text-indent:2em;text-indent: 2em;': 4,
    'text-indent:2em;white-space: normal;': 4,
    'text-indent:2em;;;;;text-indent:0': 4,
    'text-indent:2em;;': 4,
    'text-indent:2em;;text-indent: 32px': 4,
    'text-indent:2em;;text-indent: 33px': 4,
    'text-indent:2em;;text-indent: 40px': 4,
    'text-indent:2em;font-family: 宋体; white-space: normal;': 4
}


def remove_title(text):
    """
    去除 一、 1.  （1） 1）等文本前面的标题
    :param text: 未处理的文本
    :return: text: 处理后的文本
    """
    text = text.strip().replace('&ldquo;', '').replace('&rdquo;', '')
    try:
        if text[1] == '.' or text[1] == '、' or text[1] == ')' or text[1] == '）':
            text = text.split(text[1], maxsplit=1)[1]
        elif text[2] == ')' or text[2] == '）' or text[2] == '.' or text[2] == '，':
            text = text.split(text[2], maxsplit=1)[1]
        elif text[3] == ')' or text[3] == '）' or text[3] == '.' or text[3] == '，':
            text = text.split(text[3], maxsplit=1)[1]
    except:
        pass

    return text


def add_id_and_type(p_list, type_table):
    """
    给p 标签列表里面的对象，增加id属性和type属性
    :param p_list:
    :param type_table:
    :return:
    """
    new_p_list = []
    for index, p in enumerate(p_list):
        item = {}
        item['id'] = index
        item['content'] = remove_title(p.xpath('string(.)'))
        # @style属性，或者@links属性

        type = 'text-indent:2em;'
        try:
            type = p.xpath('@style')[0]
        except:
            try:
                type = p.xpath('@class')[0]
                item['content'] = remove_title(p.xpath('dt')[0].xpath('string(.)'))
            except:
                pass
        item['type'] = type_table[type]

        new_p_list.append(item)

    return new_p_list


def add_parent_id(p_list):
    new_p_list2 = []
    for index, p in enumerate(p_list):
        for i, p2 in enumerate(p_list[index::-1]):
            if p2['type'] < p['type']:
                p['parent_id'] = p2['id']
                break
        if 'parent_id' not in p:
            p['parent_id'] = -1
        new_p_list2.append(p)
    return new_p_list2


def get_json_tree(data, parent_id):
    """
    根据parent_id 和原始数据生成json树
    :param data: p标签列表
    :param parent_id: 当前节点的父节点id
    :return:
    """

    # 版本2 text未融合 版本
    content = []
    for item in data:
        if item['parent_id'] == parent_id:
            if item['type'] == 4:
                new_node = {'text': item['content']}
            else:
                new_node = {'title': item['content'], 'content': get_json_tree(data, item['id'])}
            content.append(new_node)
    return content


def remove_blank_p(p_list):
    """删除空内容的p标签"""
    new_p_list = []
    for p in p_list:
        if p.xpath('string(.)').strip():
            new_p_list.append(p)
    return new_p_list


def extract_from_origin_page(tree):
    """
    从原始常规页面提取数据，并生成json层级数
    :param tree:
    :return:
    """

    p_list = tree.xpath('//div[@class="art-box"]/*')
    # print(p_list)

    # 如果p_list中 有内容是空的需要删除
    new_p_list0 = remove_blank_p(p_list)
    new_p_list1 = add_id_and_type(new_p_list0, type_table)

    new_p_list2 = add_parent_id(new_p_list1)
    # print(new_p_list2)
    json_tree = get_json_tree(new_p_list2, -1)
    return json_tree


def extract_from_symptom_page(tree, disease_name):
    """
    症状页面 特殊处理，如果所有标签都是p，则直接返回None 不生成json树了。
    """

    p_list = tree.xpath('//div[@class="art-box"]/*')

    # 如果p_list中 有内容是空的需要删除
    new_p_list0 = remove_blank_p(p_list)
    new_p_list1 = add_id_and_type(new_p_list0, type_table)

    # 如果症状页面所有的标签类型都是4 则需要记录下，只要有一个类型不是4则可以构造json_tree，否则返回None
    for item in new_p_list1:
        if item['type'] != 4:
            new_p_list2 = add_parent_id(new_p_list1)
            json_tree = get_json_tree(new_p_list2, -1)
            return json_tree
    with open('log2.txt', mode='a', encoding='utf-8') as f:
        f.write(disease_name + '\n')
    return None


# 把鉴别，预防，并发症页面归为一类，因为其dt在art-box外面

def extract_from_special_page(tree):
    p_list = []
    dls = tree.xpath('//div[@class="chi-know chi-int"]/dl[@class="links"]')
    # print(dls)
    art_boxes = tree.xpath('//div[@class="art-box"]')
    # print(art_boxes)
    if dls:
        for i in range(len(dls)):
            p_list.append(dls[i])
            if art_boxes:
                p_list.extend(art_boxes[i].xpath('*'))
    else:
        p_list = tree.xpath('//div[@class="art-box"]/*')

    # print(p_list)

    new_p_list0 = remove_blank_p(p_list)
    new_p_list1 = add_id_and_type(new_p_list0, type_table)
    new_p_list2 = add_parent_id(new_p_list1)
    json_tree = get_json_tree(new_p_list2, -1)

    return json_tree


def myappend(key, value, l):
    if len(value):
        l.append({'title': key, 'text': value})


def extract_from_intro_page(intro_tree, home_tree):
    intro_list = []

    # 同义词（逗号的种类不同）
    try:
        disease_alias0 = intro_tree.xpath('//i[contains(text(),"别名")]/../text()')[0].split('，')
        disease_alias1 = intro_tree.xpath('//i[contains(text(),"别名")]/../text()')[0].split(',')
        if len(disease_alias0) > len(disease_alias1):
            disease_alias1 = disease_alias0
    except:
        disease_alias1 = []
    # 不以别名的形式，以同义词tab形式
    try:
        disease_alias2 = home_tree.xpath('//div[@class="tyc"]/text()')[0].split('、')
    except:
        disease_alias2 = []
    # 去空格
    disease_alias = '，'.join(list(map(lambda x: x.strip(), list(set(disease_alias1 + disease_alias2)))))

    # 简介
    try:
        intro = intro_tree.xpath('//dl[@class="intro"]/dd/text()')[0].strip()
    except:
        intro = ''

    # 典型症状
    symptom = '，'.join(list(intro_tree.xpath('//i[contains(text(),"相关症状")]/../a[@class="blue"]/text()')))

    try:
        eng_name = re.findall(r'[(]([A-Za-z\s，,’\d]*?)[)]', intro)[0]
    except:
        eng_name = ''
    myappend('疾病英文名', eng_name, intro_list)
    myappend('别名', disease_alias, intro_list)
    myappend('简介', intro, intro_list)

    myappend('典型症状', symptom, intro_list)

    # 并发症
    complication = '，'.join(list(intro_tree.xpath('//i[contains(text(),"并发")]/../a/@title')))
    myappend('并发症', complication, intro_list)

    # 发病部位
    disease_location = '，'.join(intro_tree.xpath('//i[contains(text(),"发病部位")]/../a/text()'))
    myappend('发病部位', disease_location, intro_list)
    # 科室
    department = '，'.join(intro_tree.xpath('//i[contains(text(),"就诊科室")]/../a/text()'))
    myappend('就诊科室', department, intro_list)

    # 传染性
    try:
        infectious = intro_tree.xpath('//i[contains(text(),"传染性")]/../text()')[0]
    except:
        infectious = ''
    myappend('传染性', infectious, intro_list)

    # 多发人群
    try:
        population = intro_tree.xpath('//i[contains(text(),"多发人群")]/../text()')[0]
    except:
        population = ''
    myappend('多发人群', population, intro_list)

    # 治疗费用
    try:
        price = intro_tree.xpath('//i[contains(text(),"治疗费用")]/../text()')[0].strip() or \
                intro_tree.xpath('//i[contains(text(),"治疗费用")]/../text()')[1].strip()
    except:
        price = ''
    myappend('治疗费用', price, intro_list)

    # print(intro_list)

    # 常用药品
    drug = '，'.join(list(intro_tree.xpath('//i[contains(text(),"常用")]/../a/@title')))
    myappend('常用药品', drug, intro_list)

    return intro_list


if __name__ == '__main__':
    disease_data_path = 'D:/PycharmProjects/39html_data/disease/'
    disease_name = 'fgz'
    symptom_path = disease_data_path + disease_name + Disease.REASON
    symptom_tree = _read_content(symptom_path)
    p_list = extract_from_origin_page(symptom_tree)
    # intro_path = disease_data_path + disease_name + Disease.INTRO
    # home_path = disease_data_path + disease_name + Disease.HOME
    # intro_tree = _read_content(intro_path)
    # home_tree = _read_content(home_path)
    # extract_from_intro_page(intro_tree, home_tree)
    # print(p_list)
    print(p_list)
    json.dump(p_list, open('result.json', mode='w', encoding='utf-8'), ensure_ascii=False)
