import os
import random
import string
import time
import requests
from lxml import etree
from enum import Enum, unique

"""
http://jib.xywy.com/il_sii_10026.htm   首页
http://jib.xywy.com/il_sii/gaishu/10026.htm  简介
http://jib.xywy.com/il_sii/cause/10026.htm   病因
http://jib.xywy.com/il_sii/prevent/10026.htm 预防
http://jib.xywy.com/il_sii/neopathy/10026.htm 并发症
http://jib.xywy.com/il_sii/symptom/10026.htm  症状
http://jib.xywy.com/il_sii/inspect/10026.htm  检查
http://jib.xywy.com/il_sii/diagnosis/10026.htm 鉴别诊断
http://jib.xywy.com/il_sii/treat/10026.htm 治疗
http://jib.xywy.com/il_sii/nursing/10026.htm 护理
http://jib.xywy.com/il_sii/food/10026.htm  饮食
http://jib.xywy.com/il_sii/drug/10026.htm  药品
"""

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWe'
                  'bKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'
}


@unique
class Category(Enum):
    gaishu = '/gaishu/'
    cause = '/cause/'
    prevent = '/prevent/'
    neopathy = '/neopathy/'
    symptom = '/symptom/'
    inspect = '/inspect/'
    diagnosis = '/diagnosis/'
    treat = '/treat/'
    nursing = '/nursing/'
    food = '/food/'
    drug = '/drug/'


class UrlSpider(object):
    def __init__(self, start_url, save_path):
        self.start_url = start_url
        self.save_path = save_path

    def get_url(self):
        for suffix in string.ascii_lowercase:
            print(f'正在爬取第{suffix}个页面')
            url = self.start_url + suffix + '.html'
            print(url)
            content = self.get_html(url)
            time.sleep(random.random())
            urls = self.parse_page(content)
            if urls:
                with open('urls.txt', mode='a', encoding='utf-8') as f:
                    for url in urls:
                        f.write('http://jib.xywy.com' + url + '\n')
            else:
                print(f'第{suffix}个页面不存在')

    def get_html(self, url):
        try:
            r = requests.get(url, timeout=30, headers=headers)
            r.raise_for_status()
            # r.encoding = r.apparent_encoding
            r.encoding = 'gb18030'
            return r.text
        except:
            return '异常'

    def parse_page(self, content):
        tree = etree.HTML(content)
        urls = tree.xpath('//div[@class="fl jblist-con-ear"]/div/ul/li/a/@href')
        return urls

    def download_all_page(self):
        with open('urls.txt', mode='r', encoding='utf-8') as f:
            url_list = f.read().splitlines()
        # for i in range(len(url_list)):
        #   home_url = url_list[i]
        for index, home_url in enumerate(url_list):
            print(f'正在下载第{index + 1}个疾病：{home_url}')
            # 创建文件夹
            name = home_url.split('/')[-1]
            path = os.path.join(self.save_path, name)
            if not os.path.exists(path):
                os.makedirs(path)
            url_dict = self.construct_url_list(home_url)

            # 保存文件 首页特殊
            text = self.get_html(home_url)
            self.save_to_file(text, path)
            time.sleep(random.random())
            # 保存剩余页面
            for category, url in url_dict.items():
                text = self.get_html(url)
                self.save_to_file(text, path, category)
            break

    def construct_url_list(self, home_url):
        url_dict = {}
        prefix = home_url[:26]
        suffix = home_url[27:]
        for name, member in Category.__members__.items():
            url_dict[name] = prefix + member.value + suffix
        # print(url_dict)
        return url_dict

    def save_to_file(self, text, path, category='home'):
        file_path = os.path.join(path, category + '.html')
        with open(file_path, mode='w', encoding='gb18030') as f:
            f.write(text)


def main():
    start_url = 'http://jib.xywy.com/html/'
    save_path = 'D:/PycharmProjects/xywy_data'
    spider = UrlSpider(start_url, save_path)
    spider.download_all_page()
    # spider.get_url()


if __name__ == '__main__':
    main()
