import urllib.request
from pymongo import MongoClient
from lxml import etree
import time
import random


class Medical_Spider():
    def __init__(self):
        self.client = MongoClient("localhost", 27017)
        self.db = self.client['medical']
        self.collection = self.db['data']
        # 连接mongoDB数据库

    def get_requests(self, url):
        headers = {
            'User-Agent':
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.188"
        }
        # 这个网站是真的爬虫友好,请求头写个user-agent就给爬,全部子页还只要用数字就能全部遍历,简单粗暴
        request = urllib.request.Request(url=url, headers=headers)
        response = urllib.request.urlopen(request)
        html_text = response.read().decode("gbk")
        return html_text

    def gaishu_spider(self, url):
        html_text = self.get_requests(url)
        xpath_selector = etree.HTML(html_text)
        title = xpath_selector.xpath('//div[@class="jb-name fYaHei gre"]/text()')
        categories = xpath_selector.xpath('//div[@class="wrap mt10 nav-bar"]/a/text()')
        description = xpath_selector.xpath(
            '//div[@class="jib-articl-con jib-lh-articl"]/p/text()')  # 制作这个网站的人为什么喜欢用这么奇怪的类名拼写方式?
        info = []
        infos = xpath_selector.xpath('//div[@class="mt20 articl-know"]/p')
        for i in infos:
            i1 = i.xpath('string(.)').replace('   ', '').replace('\n', '').replace('\t', '')
            if i1:  # 空的就不要了
                info.append(i1)
        gaishu = {
            "name": title[0],
            "category": categories,
            "desc": description,
            "attributes": info
        }
        return gaishu

    def cause_spider(self, url):
        html_text = self.get_requests(url)
        xpath_selector = etree.HTML(html_text)
        info = []
        infos = xpath_selector.xpath('//p//text()')  # 只使用/的话会使得<p>内包含<strong>的元素无法被选取，使用//可以避免这个问题。标签则可以在后续通过strip去除
        for i in infos:
            i1 = i.strip()
            if i1:
                info.append(i1)
        info1 = ''.join(info)  # 一个疾病只需要一个病因属性,在拥有多条文段的时候选择进行合并
        # print(info1)
        return info1

    def prevent_spider(self, url):
        html_text = self.get_requests(url)
        xpath_selector = etree.HTML(html_text)
        info = []
        infos = xpath_selector.xpath('//p//text()')
        for i in infos:
            i1 = i.strip()
            if i1:
                info.append(i1)
        info1 = ''.join(info)
        # print(info1)
        return info1

    def symptom_spider(self, url):
        html_text = self.get_requests(url)
        xpath_selector = etree.HTML(html_text)
        symptom = xpath_selector.xpath('//a[@class="gre"]/text()')
        return symptom

    def inspect_spider(self, url):
        html_text = self.get_requests(url)
        xpath_selector = etree.HTML(html_text)
        inspect = xpath_selector.xpath('//li[@class="check-item"]/a/@href')
        return inspect

    #  本来因为标签页内数据不全没有爬这一项，但是后面找到方法另外建了一个库来存这个链接，数据处理的时候会把信息提取出去

    def treat_spider(self, url):
        html = self.get_requests(url)
        xpath_selector = etree.HTML(html)
        info = []
        infos = xpath_selector.xpath('//div[starts-with(@class,"mt20 articl-know")]/p')
        for i in infos:
            i1 = i.xpath('string()')
            # 这里的key-value值被存在了不同的span中,如果直接设置//text()会导致遍历中key和value被作为不同list元素,这是我们不希望看到的.使用string()合并同一段落内的全部文段
            i1 = i1.strip()
            i1 = i1.replace(' ', '').replace('\n', '').replace('\t', '')
            # print(type(i1))
            if i1:
                info.append(i1)
        return info

    def food_spider(self, url):
        html_text = self.get_requests(url)
        xpath_selector = etree.HTML(html_text)
        div = xpath_selector.xpath('//div[@class="diet-img clearfix mt20"]')
        try:
            food_data = {
                'good': div[0].xpath('./div/p/text()'),
                'bad': div[1].xpath('./div/p/text()'),
                'recommand': div[2].xpath('./div/p/text()')
            }
        except:
            return {}

        return food_data

    def drug_spider(self, url):
        html_text = self.get_requests(url)
        xpath_selector = etree.HTML(html_text)
        drug = [info.replace(' ', '').replace('\n', '').replace('\t', '') for info in
                xpath_selector.xpath('//div[@class="fl drug-pic-rec mr30"]/p/a/text()')]
        return drug

    def main_spider(self):
        i = 1
        # for p in range(1, 2):
        # 本行代码供测试使用，如需进行爬取请注释本行并且解注释下面一行代码
        for p in range(1, 12000):
            try:
                gaishu = 'http://jib.xywy.com/il_sii/gaishu/{}.htm'.format(p)
                cause = 'http://jib.xywy.com/il_sii/cause/{}.htm'.format(p)
                prevent = 'http://jib.xywy.com/il_sii/prevent/{}.htm'.format(p)
                symptom = 'http://jib.xywy.com/il_sii/symptom/{}.htm'.format(p)
                inspect = 'https://jib.xywy.com/il_sii/inspect/{}.htm'.format(p)
                treat = 'http://jib.xywy.com/il_sii/treat/{}.htm'.format(p)
                food = 'http://jib.xywy.com/il_sii/food/{}.htm'.format(p)
                drug = 'http://jib.xywy.com/il_sii/drug/{}.htm'.format(p)
                data = {
                    'basic_info': self.gaishu_spider(gaishu),
                    'cause_info': self.cause_spider(cause),
                    'prevent_info': self.prevent_spider(prevent),
                    'symptom_info': self.symptom_spider(symptom),
                    'inspect_info': self.inspect_spider(inspect),
                    'treat_info': self.treat_spider(treat),
                    'food_info': self.food_spider(food),
                    'drug_info': self.drug_spider(drug)
                }
                self.db['data'].insert_one(data)
                # 存入数据库
                time.sleep(random.uniform(0.0001, 0.001))
                # 反反爬,至少不是固定时间(但是这么短的时间随机真的有作用吗,长了数据又爬不完)
                print('bruh{}'.format(i))
                i = i + 1
                # 提示爬虫工作进度,如果进程以外中断可以直接定位到未爬取的html页面,这时候修改main_spider里的参数就可以断点续爬了
            except Exception as e:
                print(e)
                # 碰到报错还能继续跑的程序才是好程序
        return

    def inspect_name_spider(self):
        i = 1
        # for p in range(90, 91):  # 本行代码用于，呃，你也能想到的，测试
        for p in range(1, 4000):
            try:
                inspect1 = 'http://jck.xywy.com/jc_{}.html'.format(p)
                html_text = self.get_requests(inspect1)
                xpath_selector = etree.HTML(html_text)
                name1 = xpath_selector.xpath('//div[@class="clearfix"]//text()')
                #  只有进入这个页面才能爬取出完整的检查方法，不然会获得带有省略号的残缺检查方式
                name2 = []
                for names in name1:
                    names1 = names.strip()
                    if names1:
                        name2.append(names1)
                        break
                # print(name3)
                data1 = {
                    'url': inspect1,
                    'name': name2[0]
                }
                self.db['jc'].insert_one(data1)
                time.sleep(random.uniform(0.0001, 0.001))
                print('bruh1{}'.format(i))
                i = i + 1
            except Exception as e:
                print(e)
        return


item1 = Medical_Spider()
# item1.main_spider()
item1.inspect_name_spider()
#  补爬了一个检查的内容，若直接使用测试代码运行检查结果会导致部分内容缺失，敬请理解
