"""
Description: 提取症状页面的数据，并做结构化处理，存储到为json格式，并保存到mongodb中
"""
import json

from lxml import etree

from utils import get_disease_url, split_lines, extractor
from symptom_page.log import my_log

logger = my_log('symptom')

"""
疾病概述
临床表现与病因 （症状 疾病分类 病因 病理生理）
检查
诊断
治疗
护理
并发症
"""


class Disease:
    """
    疾病路径索引
    """
    HOME = '/home.html'
    INTRO = '/jbzs.html'
    SYMPTOM = '/zztz.html'  # 症状和原因结合在一起，症状中如果出现 诊断 需要将其放到 （检查与诊断下）
    REASON = '/blby.html'
    EXAM = '/jcjb.html'  # 检查 诊断 鉴别 一起
    IDENTIFICATION = '/jb.html'
    TREATMENT = '/yyzl.html'  # 治疗
    PREVENTION = '/yfhl.html'  # 预防护理和饮食保健
    NURSING = '/hl.html'
    DIET = '/ysbj.html'
    COMPLICATION = '/bfbz.html'


def read_content(path):
    """
    读取html页面的内容
    :param path:
    :return: xpath对象
    """
    try:
        with open(path, mode='r', encoding='gb18030') as f:
            content = f.read()
            return etree.HTML(content)
    except FileNotFoundError:
        logger.exception(path + '下文件未找到', exc_info=True)
        return None


def make_json_tree(disease_data_path, disease_index, disease_name):
    """
    根据疾病路径，找到其对应的页面，获得xpath_tree，并将xpath_tree转成json_tree返回
    :param disease_data_path:
    :param disease_index:
    :param disease_name:
    :return:
    """
    path = disease_data_path + disease_name + disease_index
    xpath_tree = read_content(path)

    # 如果是饮食页面则需要特殊处理
    if disease_index == '/ysbj.html':
        diet_list = xpath_tree.xpath('//div[@class="yinshi_title"]/text()')
        diet_list2 = [{'title': item.split('：')[0], 'text': item.split('：')[1]} for item in diet_list]
        return [{'title': '饮食', 'content': diet_list2}]
    # 疾病简介页面也需要特殊处理
    elif disease_index == '/jbzs.html':
        home_path = disease_data_path + disease_name + Disease.HOME
        home_tree = read_content(home_path)
        json_tree = extractor.extract_from_intro_page(xpath_tree, home_tree)

    else:
        json_tree = extractor.extract_from_origin_page(xpath_tree, disease_name)

    return json_tree


if __name__ == '__main__':
    disease_url_path = 'D:/PycharmProjects/39_health_disease/disease_fuse_no_dup.txt'
    disease_data_path = 'D:/PycharmProjects/39html_data/disease/'
    file_data_path = 'D:/PycharmProjects/39html_data/json_data/'
    i = 0
    for url in get_disease_url(disease_url_path):
        i += 1
        print(i)
        disease_name = split_lines(url)
        # disease_name = 'gnqzhz'
        try:

            # 获取疾病中文名
            home_path = disease_data_path + disease_name + Disease.HOME
            home_tree = read_content(home_path)
            name = home_tree.xpath('//h1/text()')[0]
            all_dict = {'title': name, 'content': []}

            dict1 = {'title': '疾病概述', 'content': []}
            json_intro_tree: dict = make_json_tree(disease_data_path, Disease.INTRO, disease_name)
            dict1['content'].extend(json_intro_tree)

            # 临床表现与病因
            dict2 = {'title': '临床表现与病因', 'content': []}
            # 如果json_symptom_tree页面存在诊断，需要pop出来，存放到检查与诊断的dict中
            json_symptom_tree = make_json_tree(disease_data_path, Disease.SYMPTOM, disease_name)
            # print(json_symptom_tree)
            json_reason_tree = make_json_tree(disease_data_path, Disease.REASON, disease_name)
            diagnosis = {}
            for item in json_symptom_tree:
                if item['title'].find('诊断') != -1:
                    diagnosis = json_symptom_tree.pop()
                    break

            dict2['content'].extend(json_symptom_tree)
            dict2['content'].extend(json_reason_tree)

            # 检查与诊断
            dict3 = {'title': '检查与诊断', 'content': []}
            json_exam_tree = make_json_tree(disease_data_path, Disease.EXAM, disease_name)
            json_identification_tree = make_json_tree(disease_data_path, Disease.IDENTIFICATION, disease_name)
            dict3['content'].extend(json_exam_tree)
            if diagnosis:
                dict3['content'].append(diagnosis)
            dict3['content'].extend(json_identification_tree)

            dict4 = {'title': '治疗', 'content': []}
            json_treatment_tree = make_json_tree(disease_data_path, Disease.TREATMENT, disease_name)
            # print(json_treatment_tree)

            # 治疗中如果有中医的需要移除出去
            for item in json_treatment_tree:
                if item['title'].find('中医') != -1:
                    json_treatment_tree.remove(item)
                    break
            dict4['content'].extend(json_treatment_tree)

            dict5 = {'title': '预防与护理', 'content': []}
            json_prevention_tree = make_json_tree(disease_data_path, Disease.PREVENTION, disease_name)
            dict5['content'].extend(json_prevention_tree)
            json_nursing_tree = make_json_tree(disease_data_path, Disease.NURSING, disease_name)
            dict5['content'].extend(json_nursing_tree)
            json_diet_tree = make_json_tree(disease_data_path, Disease.DIET, disease_name)
            dict5['content'].extend(json_diet_tree)

            dict6 = {'title': '并发症', 'content': []}
            json_complication_tree = make_json_tree(disease_data_path, Disease.COMPLICATION, disease_name)
            dict6['content'].extend(json_complication_tree)

            all_dict['content'].append(dict1)
            all_dict['content'].append(dict2)
            all_dict['content'].append(dict3)
            all_dict['content'].append(dict4)
            all_dict['content'].append(dict5)
            all_dict['content'].append(dict6)
            # print(all_dict)
            json.dump(all_dict,
                      open(file_data_path + disease_name + '.json', mode='w', encoding='utf-8'),
                      ensure_ascii=False)
        except:
            # 把出现错误的写入到日志中
            with open('log.txt', mode='a', encoding='utf-8') as f:
                f.write(disease_name + '\n')
