# 爬虫文件描述
# /*********************************************************
# Copyright @ 苏州瑞泰信息技术有限公司 All rights reserved.
# 创建人   : Sovaint Xia
# 创建时间 : 2021/03/18 00:00:00
# 说明     : 微医-分布式爬虫
# *********************************************************/
import csv

from scrapy.http import Request
import unicodedata
import requests
import json
from ..items import hosdata, docdata
import scrapy


# class WyRedisSpiderSpider(RedisSpider):
class WyRedisSpiderSpider(scrapy.Spider):
    name = 'wy_redis_spider'
    # redis_key = 'wyHos:start_urls'
    start_urls = ['https://www.wedoctor.com/search/hospital?']

    # def parse(self, response):
    #     se = requests.session()
    #     province_data = se.get(
    #         "https://www.guahao.com/json/white/area/provinces").text
    #     province = json.loads(province_data)
    #     for p in province:
    #         city_data = se.get(
    #             "https://www.guahao.com/json/white/area/citys?provinceId={0}".format(p['value'])).text
    #         city = json.loads(city_data)
    #         for c in city:
    #             if c["value"] != "all":
    #                 level_list = ['30', '20', '10', '3', '2', '1']
    #                 for k in level_list:
    #                     yield Request(url="https://www.guahao.com/hospital/{0}/{1}/{2}/{3}/all/{4}/all/all/0/false/region_sort".format(p['value'], p['text'], c['value'], c['text'], k), meta={'page_index': 1,'pname':p["text"],'cname':c["text"]}, callback=self.province_parse)

    def parse(self, response):
        with open("D:\PythonProject\Data\GetDoctor.csv",'r',encoding='utf-8') as file:
            reader = csv.reader(file)
            next(reader)
            for row in reader:
                target_name = row[5]
                target_std_name = row[-1]
                response.meta["target_name"] = target_name
                response.meta["target_std_name"] = target_std_name
                yield Request(url="https://www.wedoctor.com/search/hospital?q={}&searchType=search".format(target_name), meta=response.meta, callback=self.lookforhos)

    def lookforhos(self, response):
        try:
            hos_list = response.xpath(
                "/html/body/div[1]/div[2]/div/div[1]/div[3]/div/ul/li/a/@href").extract()
            for hos_url in hos_list:
                yield Request(url=hos_url, meta=response.meta, callback=self.hos_parse)
        except:
            pass
    def province_parse(self, response):
        page_index = response.meta['page_index']
        pname = response.meta["pname"]
        cname = response.meta["cname"]
        hos_list = response.xpath(
            "//div[@class='g-hospital-items to-margin']/ul[@class='hos_ul']/li[@class='g-hospital-item J_hospitalItem']/a[@class='cover-bg seo-anchor-text']/@href").extract()
        for hos_url in hos_list:
            yield Request(url=hos_url, meta={'pname':pname,'cname':cname}, callback=self.hos_parse)
        if len(response.xpath(" ").xpath("string(.)").extract()) > 0:
            total_num = int(response.xpath(
                "//div[@class='filter-tip']/span[@class='result-num']/strong[@id='J_ResultNum']").xpath("string(.)").extract_first())
            total_page = (total_num // 10) + 1
        else:
            total_page = 0
        if page_index+1 <= total_page:
            yield Request(url=response.url.split('region_sort')[0]+'region_sort/p{0}'.format(page_index+1), meta={'page_index': page_index+1,'pname':pname,'cname':cname}, callback=self.province_parse)

    def hos_parse(self, response):
        dep_list = response.xpath(
            "/html/body/div[1]/div[2]/div/div[2]/div/div/section[2]/div[2]/ul/li/p/span/a/@href").extract()
        for dep_url in dep_list:
            response.meta["page_index"] = 1
            yield Request(url=dep_url.replace("/department", "/department/shiftcase"), meta=response.meta, callback=self.dep_parse)
        # yield Request(url=response.url.replace("/hospital", "/hospital/introduction"), meta={'pname':pname,'cname':cname}, callback=self.hos_detail_parse)

    def hos_detail_parse(self, response):
        item = hosdata()
        item['province'] = response.meta["pname"]
        item['city'] = response.meta["cname"]
        item['name'] = response.xpath(
            "//div[@class='info']/div[@class='detail word-break']/h1/strong/a").xpath("normalize-space(.)").extract_first()
        item['address'] = response.xpath(
            "//div[@class='detail word-break']/div[@class='address']/span").xpath("normalize-space(.)").extract_first().replace(u'\n', u' ').replace(u'\r', u' ')
        item['telephone'] = response.xpath(
            "//div[@class='detail word-break']/div[@class='tel']/span").xpath(
            "normalize-space(.)").extract_first().replace(u'\n', u' ').replace(u'\r', u' ')
        item['hos_level'] = response.xpath(
            "//div[@class='info']/div[@class='detail word-break']/h1/span").xpath("normalize-space(.)").extract_first()
        item['remarks'] = unicodedata.normalize('NFKC', response.xpath(
            "//div[@class='content-wrap']/div[@class='introduction']/div[@class='introduction-content']/pre").xpath("string(.)").extract_first()).replace(u'\n', u' ').replace(u'\r', u' ')
        item['order_num'] = response.xpath(
            "//div[@class='status']/div[@class='C0153256']/div[@class='total fix-clear']/p/strong[1]").xpath("normalize-space(.)").extract_first()
        item['url'] = response.url
        yield item

    def dep_parse(self, response):
        if len(response.xpath("//div[@class='pagers']").xpath("a").extract()) > 0:
            total_page = (int(
                response.xpath("/html/body/div[1]/div[2]/div/div[1]/span/strong").xpath(
                    "normalize-space(.)").extract_first()) // 12) + 1
        else:
            total_page = 0
        page_index = response.meta['page_index']
        doc_list = response.xpath(
            "//div[@class='g-doctor-item2 g-clear to-margin']/div[@class='g-doc-baseinfo g-left']/dl/dt/a[@class='name js-doc']/@href").extract()
        for doc_url in doc_list:
            yield Request(url=doc_url, callback=self.doc_parse)
        if page_index + 1 <= total_page:
            response.meta['page_index'] = response.meta['page_index'] + 1
            yield Request(url=response.url.split('&pageNo=')[0] + '&pageNo={0}'.format(page_index + 1),
                          meta=response.meta, callback=self.dep_parse)

    def doc_parse(self, response):
        item = docdata()
        item['name'] = response.xpath(
            "//div[@class='info']//div[@class='detail word-break']/h1/strong[@class='J_ExpertName']").xpath(
            "string(.)").extract_first()
        item['hospital'] = response.xpath(
            "//div[@class='detail word-break']/div[@id='card-hospital']/p/b[1]").xpath(
            "normalize-space(.)").extract_first()
        item['depart'] = response.xpath(
            "//div[@class='detail word-break']/div[@id='card-hospital']/p/b[2]").xpath(
            "normalize-space(.)").extract_first()
        disease_list = response.xpath(
            "//div[@class='info']/div[@class='detail word-break']/div[@class='keys']/b").xpath(
            "normalize-space(.)").extract()
        item['disease_list'] = disease_list
        if response.xpath(
                "//div[@class='goodat']/input/@value").extract_first() != None:
            item['goodat'] = unicodedata.normalize('NFKC', response.xpath(
                "//div[@class='goodat']/input/@value").extract_first())
        else:
            item['goodat'] = unicodedata.normalize('NFKC', response.xpath(
                "//div[@class='goodat']/span").extract_first())
        if response.xpath(
                "//div[@class='about']/input/@value").extract_first() != None:
            item['remarks'] = unicodedata.normalize('NFKC', response.xpath(
                "//div[@class='about']/input/@value").extract_first()).replace(u'\n', u' ').replace(u'\r', u' ')
        else:
            item['remarks'] = unicodedata.normalize('NFKC', response.xpath(
                "//div[@class='about']/span").extract_first()).replace(u'\n', u' ').replace(u'\r', u' ')
        item['url'] = response.url
        yield item
