import scrapy
import re
import math


class DoctorSpider(scrapy.Spider):
    name = "doctor"
    start_urls = ["https://www.haodf.com/hospital/list-11.html"]
    counter = 0

    def parse(self, response):
        """获取省份以及链接"""
        province_list = response.xpath("//div[@id='el_tree_1000000']/div[contains(@class, 'kstl')]/a")
        for province in province_list:
            self.counter += 1
            province_url = "https:" + (province.xpath("./@href").get())
            province_name = province.xpath("./text()").get()
            # print(self.counter, province_name, province_url)

            yield scrapy.Request(url=province_url,
                                 callback=self.district,  # 当前请求对应的解析函数
                                 meta={"province_name": province_name})  # 上下文传递数据

    def district(self, response):
        """解析省份以及获取区的链接"""
        province_name = response.meta["province_name"]
        district_list = response.xpath("//div[@id='el_tree_1000000']/div[@class='ksbd']/ul/li")
        for district in district_list:
            district_name = district.xpath("./a/@title").get()
            district_url = "https:" + (district.xpath("./a/@href").get())
            # print(province_name, district_name, district_url)

            yield scrapy.Request(url=district_url,
                                 callback=self.hospital,
                                 meta={
                                     "district_name": district_name,
                                     "province_name": province_name
                                 })

    def hospital(self, response):
        """解析区获取所有的医院链接"""
        province_name = response.meta["province_name"]
        district_name = response.meta["district_name"]
        hospital_list = response.xpath("//div[@class='m_ctt_green']/ul/li")
        for hospital in hospital_list:
            hospital_url = hospital.xpath("./a/@href").get().rstrip(".html")
            # 对医院链接进行拼接直接跳到医院的科室位置
            keshi_url = hospital_url + "/keshi/list.html"
            hospital_name = hospital.xpath("./a/text()").get()
            # print(self.counter, province_name, district_name, hospital_name, hospital_url)

            yield scrapy.Request(url=keshi_url,
                                 callback=self.derpartment,
                                 meta={
                                     "province_name": province_name,
                                     "district_name": district_name,
                                     "hospital_name": hospital_name,
                                 }
                                 )

    def derpartment(self, response):
        """解析科室获取科室的所有链接"""
        province_name = response.meta["province_name"]
        district_name = response.meta["district_name"]
        hospital_name = response.meta["hospital_name"]
        department_list = response.xpath("//div[@class='hos-keshi']/div[@class='item-wrap']//li")
        for department in department_list:
            department_url = department.xpath(".//a/@href").get().rstrip(".html")
            # 获取科室的医生数量
            doctor_count = department.xpath(".//div[@class='count']/text()").get()
            # 对链接在进行拼接，直接跳转到科室医生界面
            doctor_url = department_url + "/tuijian.html?type=keshi"

            department_name = department.xpath(".//div[@class='name-txt']/text()").get()
            yield scrapy.Request(url=doctor_url,
                                 callback=self.doctor_list,
                                 meta={
                                     "province_name": province_name,
                                     "district_name": district_name,
                                     "hospital_name": hospital_name,
                                     "department_name": department_name,
                                     "department_url": department_url,
                                     "doctor_count": doctor_count[:-3],
                                     "page": 1,

                                 })

    def doctor_list(self, response):
        """最后解析医生以及相关信息"""
        province_name = response.meta["province_name"]
        district_name = response.meta["district_name"]
        hospital_name = response.meta["hospital_name"]
        department_name = response.meta["department_name"]
        department_url = response.meta["department_url"]
        page = response.meta["page"]
        doctor_count = response.meta["doctor_count"]

        # 先判断该科下有没有医生，如果没有直接退出循环
        if int(doctor_count) == 0:
            return
        # 有医生，解析网页
        doctors = response.xpath("//ul[@class='doc-list']/li[@class='item']/a[@class='item-bd']")  # 根据页面结构调整
        if not doctors:
            return  # 当前页没有数据，结束

        for doctor in doctors:
            self.counter += 1
            doctor_name = doctor.xpath(".//p/span[@class='name']/text()").get()
            wenzhen = doctor.xpath("string(.//span[@class='service-item'][1])").get()
            wenzhen = re.sub(r"\s", "", wenzhen)
            guahao = doctor.xpath("string(.//span[@class='service-item'][2])").get()
            guahao = re.sub(r"\s", "", guahao)
            recommend = doctor.xpath("./div[@class='tuijian-redu']//span[@class='score']/text()").get() or "暂无"
            doctor_url = doctor.xpath("./@href").get()
            print(self.counter, province_name, district_name, hospital_name, department_name, doctor_name, wenzhen,
                  guahao, recommend,
                  doctor_url)

        next_page = page + 1
        # 根据科室下的医生总数，获取页码，如果超过该页码则退出循环
        if next_page > math.ceil(int(doctor_count) / 20):
            return
        next_url = f"{department_url}/tuijian.html?type=keshi&p={next_page}"
        yield scrapy.Request(url=next_url,
                             callback=self.doctor_list,
                             meta={
                                 "province_name": province_name,
                                 "district_name": district_name,
                                 "hospital_name": hospital_name,
                                 "department_name": department_name,
                                 "department_url": department_url,
                                 "page": next_page,
                                 "doctor_count": doctor_count,
                             })
