import redis
import scrapy
import re
import math
from scrapy_redis.spiders import RedisSpider


class AnswerSpider(RedisSpider):
    name = "answer"
    redis_key = "task_queue"
    redis_conn = redis.Redis(host='192.168.18.11', db=3)

    def parse(self, response):
        province = response.meta.get("province", "上海")
        hospital_name = response.meta.get("hospital_name", "上海交通大学医学院附属仁济医院（东院）")
        print("任务信息：", province, hospital_name)
        department_list = response.xpath(
            "//div[contains(@id,'anchor')]/ul[@class='item-ul']/li[@class='item-li']/a[@class='faculty-item']")
        for department in department_list:
            department_name = department.xpath(".//div[@class='name-txt']/text()").get()
            department_url = department.xpath("./@href").get()
            department_doctor_count = department.xpath(".//div[@class='count']/text()").get()
            department_doctor_count = int(department_doctor_count.replace("位医生", ""))

            pages = math.ceil(department_doctor_count / 20)
            for page in range(1, pages + 1):
                url = ("https://www.haodf.com/nhospital/pc/keshi/ajax"
                       "HosTuijianDocList")
                hospital_id, hospitalFaculty_id = re.findall("\d+", department_url)
                body = f"hospitalId={hospital_id}&order=2&hospitalFacultyId={hospitalFaculty_id}&docGradeList=&serviceArr=&nowPage={page}&needTopTitle=0"
                headers = {
                    "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
                }
                yield scrapy.Request(url=url, method="POST", headers=headers, body=body, callback=self.parse_doctor,
                                     meta={
                                         "province": province,
                                         "hospital_name": hospital_name,
                                         "department_name": department_name
                                     })

    def parse_doctor(self, response):
        province = response.meta["province"]
        hospital_name = response.meta["hospital_name"]
        department_name = response.meta["department_name"]
        for item in response.json()["data"]:
            doctor_id = item["doctorId"]
            doctor_name = item["baseDoctorInfo"]["name"]
            url = f"https://www.haodf.com/doctor/{doctor_id}/bingcheng.htmL?p_type=all"
            yield scrapy.Request(url=url, callback=self.parse_doctor_answer_total, meta={
                "doctor_name": doctor_name,
                "hospital_name": hospital_name
            })

    def parse_doctor_answer_total(self, response):
        hospital_name = response.meta["hospital_name"]
        doctor_name = response.meta["doctor_name"]
        answer_count = response.xpath("//span[@class='f14 orange1']/text()").get()
        if answer_count:
            answer_count = int(answer_count.replace("人次", ""))
            print(f"当前{doctor_name}医生问诊量：{answer_count}")
            print(f"采集全部问诊信息需要发送请求总数：{math.ceil(answer_count / 30)}")
            self.redis_conn.rpush("result",
                                  f"{hospital_name}--{doctor_name}--{answer_count}--{math.ceil(answer_count / 30)}")
