# 爬虫文件描述
# /*********************************************************
# Copyright @ 苏州瑞泰信息技术有限公司 All rights reserved.
# 创建人   : Luck Chen
# 创建时间 : 2023/08/23 00:00:00
# 说明     : 中华康网-分布式爬虫
# *********************************************************/
import csv
import time
import scrapy
from scrapy.http import Request
from ..items import hosdata, docdata
import unicodedata
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
from selenium.webdriver.common.by import By
import re

class ZhkwRedisSpiderSpider(scrapy.Spider):
    name = 'zhkw_redis_spider'
    # start_urls = ['http://www.cnkang.com/yyk/hospdept/5641/']
    start_urls = ['http://www.cnkang.com/yyk/hospital/1/0/']

    def parse(self, response):
        province_name_list = response.xpath("//div[@class='yslist_dq yslist_dq03']/dl[1]/dd/a/text()").extract()[0]
        url = response.url
        url_dict = {
            "北京": "http://www.cnkang.com/yyk/hospital/1/0/",
            "上海": "http://www.cnkang.com/yyk/hospital/2/0/",
            "广东": "http://www.cnkang.com/yyk/hospital/3/0/",
            "山东": "http://www.cnkang.com/yyk/hospital/10/0/",
            "河南": "http://www.cnkang.com/yyk/hospital/13/0/",
            "江苏": "http://www.cnkang.com/yyk/hospital/5/0/",
            "浙江": "http://www.cnkang.com/yyk/hospital/6/0/",
            "安徽": "http://www.cnkang.com/yyk/hospital/7/0/",
            "湖北": "http://www.cnkang.com/yyk/hospital/18/0/",
            "湖南": "http://www.cnkang.com/yyk/hospital/19/0/",
            "江西": "http://www.cnkang.com/yyk/hospital/8/0/",
            "福建": "http://www.cnkang.com/yyk/hospital/9/0/",
            "海南": "http://www.cnkang.com/yyk/hospital/27/0/",
            "四川": "http://www.cnkang.com/yyk/hospital/20/0/",
            "贵州": "http://www.cnkang.com/yyk/hospital/28/0/",
            "云南": "http://www.cnkang.com/yyk/hospital/24/0/",
            "陕西": "http://www.cnkang.com/yyk/hospital/22/0/",
            "甘肃": "http://www.cnkang.com/yyk/hospital/23/0/",
            "青海": "http://www.cnkang.com/yyk/hospital/29/0/",
            "宁夏": "http://www.cnkang.com/yyk/hospital/30/0/",
            "新疆": "http://www.cnkang.com/yyk/hospital/25/0/",
            "台湾": "http://www.cnkang.com/yyk/hospital/455/0/",
            "香港": "http://www.cnkang.com/yyk/hospital/453/0/",
            "澳门": "http://www.cnkang.com/yyk/hospital/454/0/",
            "西藏": "http://www.cnkang.com/yyk/hospital/31/0/",
            "天津": "http://www.cnkang.com/yyk/hospital/14/0/",
            "重庆": "http://www.cnkang.com/yyk/hospital/21/0/",
            "吉林": "http://www.cnkang.com/yyk/hospital/17/0/",
            "辽宁": "http://www.cnkang.com/yyk/hospital/15/0/",
            "内蒙古": "http://www.cnkang.com/yyk/hospital/26/0/",
            "河北": "http://www.cnkang.com/yyk/hospital/12/0/",
            "山西": "http://www.cnkang.com/yyk/hospital/11/0/",
            "广西": "http://www.cnkang.com/yyk/hospital/4/0/",
            "黑龙江":"http://www.cnkang.com/yyk/hospital/16/0/",
        }
        yield Request(url=url, meta={"province_dict":url_dict}, callback=self.lookforhos)
        # province_code_list = response.xpath("//div[@class='yslist_dq yslist_dq03']/dl[1]/dd/a/@href").extract()
        # for a, b in zip(province_name_list, province_code_list):
        #     # print(a,b)
        #     # p_code = b.split('/')[-1]
        #     url = 'http://www.cnkang.com' + b
        #     yield Request(url=url, meta={"pname": a}, callback=self.city_parse)

    def city_parse(self, response):
        pname = response.meta["pname"]
        city_code_list = response.xpath("//div[@class='yslist_dq yslist_dq03']/dl[2]/dd/a/@href").extract()
        city_name_list = response.xpath("//div[@class='yslist_dq yslist_dq03']/dl[2]/dd/a/text()").extract()
        for x, y in zip(city_code_list, city_name_list):
            if y == '全部' or y == '其他':
                print(y)
            # c_code = x.split('/')[-1]
            url = 'http://www.cnkang.com' + x
            yield Request(url=url, meta={"pname": pname, "cname": y},
                          callback=self.hos_parse)
    def lookforhos(self,response):
        province_dict = response.meta["province_dict"]
        with open("D:\PythonProject\Data\GetDoctor.csv", "r", encoding="utf-8") as file:
            reader = csv.reader(file)
            next(reader)
            for row in reader:
                target_province = row[2].replace('省', '').replace('市', '').replace('自治区', '').replace('回族', '').replace('壮族', '')
                target_name = row[5]
                target_std_name = row[-1]
                self.logger.debug("Row: %s", row)
                self.logger.info("Target Province: %s, Target Name: %s, Target Std Name: %s", target_province,
                                 target_name, target_std_name)
                url = province_dict[target_province]
                response.meta['pname'] = target_province
                response.meta['target_name'] = target_name
                response.meta['target_std_name'] = target_std_name

                yield Request(url=url, meta=response.meta, callback=self.hos_parse)


    def hos_parse(self, response):
        target_name = response.meta.get('target_name')
        target_std_name = response.meta.get('target_std_name')

        # 获取医院链接列表
        hos_list = response.xpath("//div[@class='yslist06']/div/ul/li/a/@href").extract()
        hos_names = [response.xpath(f"//div[@class='yslist06']/div/ul/li[{i + 1}]/a/text()").extract_first() for i in
                     range(len(hos_list))]

        # 检查目标名称是否在医院列表中
        if target_name in hos_names:
            index = hos_names.index(target_name) + 1
            detail_list = f"http://www.cnkang.com{hos_list[index - 1].replace('hospindex', 'hospdept')}"
            yield Request(url=detail_list, meta=response.meta, callback=self.hos_detail_parse)
        elif target_std_name in hos_names:
            index = hos_names.index(target_std_name) + 1
            detail_list = f"http://www.cnkang.com{hos_list[index - 1].replace('hospindex', 'hospdept')}"
            yield Request(url=detail_list, meta=response.meta, callback=self.hos_detail_parse)
        else:
            # 如果没有找到目标医院，则跳过此目标
            pass  # 或者可以在这里记录日志或进行其他操作

    def hos_detail_parse(self, response):
        dep_list = response.xpath(
            "//div[@class='w880 left']/div[@class='yslist06 yslist06b']/div/ul/li/a/@href").extract()
        dep_name = response.xpath(
            "//div[@class='w880 left']/div[@class='yslist06 yslist06b']/div/ul/li/a/text()").extract()
        for x, dep_url in zip(dep_name, dep_list):
            # dep_url = dep_url.replace('.htm', '/menzhen.htm')
            url = 'http://www.cnkang.com' + dep_url
            response.meta['department'] = x
            yield Request(url=url, meta=response.meta, callback=self.dep_detail_parse)

    def dep_detail_parse(self, response):
        doc_list = response.xpath(
            "//div[3]/div[1]/div[3]/span[1]/div/div/a/@href").extract()
        for doc_url in doc_list:
            url = 'http://www.cnkang.com' + doc_url
            yield Request(url=url, meta=response.meta, callback=self.doc_parse)

    def doc_parse(self, response):
        item = docdata()
        item['province'] = response.meta["pname"]
        item['city'] = ''
        item['hospital'] = ''
        item['department'] = response.meta["department"]
        item["std_hos"] = response.meta["target_std_name"]
        if len(response.xpath(
                "//div[@class='doctor-detail_profile clearfix']/div[@class='profile_more']/div[@class='profile-name']/span").extract()) > 0:
            item['name'] = response.xpath("//div[@class='doctor-detail_profile clearfix']/div[@class='profile_more']/div[@class='profile-name']/span").xpath("string(.)").extract()[0]
        else:
            item['name'] = response.xpath(
                "/html/body/div[2]/div/dl/dd/ul/span/a").xpath(
                "string(.)").extract()[0]
        if len(response.xpath(
                "/html[1]/body[1]/div[2]/div[1]/dl[1]/dd[1]/ul[1]").extract()) > 0:
            jobtitle = response.xpath("/html[1]/body[1]/div[2]/div[1]/dl[1]/dd[1]/ul[1]").xpath("string(.)").extract()[0].replace("\n", "").replace("\r", "").replace("\u3000", "").replace(" ",  "")
            pat = r'[ ]'
            item['jobtitle'] = re.split(pat, jobtitle)[0]
            item['teachtitle'] = re.split(pat, jobtitle)[-1].replace("\n", "").replace("\r", "").replace(" ", "")
        else:
            item['jobtitle'] = response.xpath("//div[2]/div/dl/dt/text()").extract()[0]
            item['teachtitle'] = ''
        if len(response.xpath("/html/body/div[3]/div[1]/ul[4]").extract())>0:
            item['goodat'] = response.xpath("/html/body/div[3]/div[1]/ul[4]").xpath("string(.)").extract()[0].replace("\n", "").replace("\r", "").replace("\u3000", "").replace(" ",  "")
        elif len(response.xpath(
                "//dl[@class='ys17b ys17b_1']/div[@id='cc']").extract()) > 0:
            item['goodat'] = response.xpath("//dl[@class='ys17b ys17b_1']/div[@id='cc']").xpath("string(.)").extract()[0].replace("\n", "").replace("\r", "").replace("\u3000", "").replace(" ",  "")
        else:
            item['goodat'] = ''
        if len(response.xpath("/html[1]/body[1]/div[3]/div[1]/ul[4]/p[1]").extract()) > 0:
            item['remarks'] = response.xpath("/html[1]/body[1]/div[3]/div[1]/ul[4]/p[1]").xpath("string(.)").extract()[0].replace("\n", "").replace("\r", "").replace("\u3000", "").replace(" ", "")
        elif len(response.xpath("//dl[@id='ysme']/div[@id='jj']").extract()) > 0:
            item['remarks'] = response.xpath("//dl[@id='ysme']/div[@id='jj']").xpath("string(.)").extract()[0].replace("\n", "").replace("\r", "").replace("\u3000", "").replace(" ", "")
        else:
            item['remarks'] = ''
        item['url'] = response.url
        # print(item)
        yield item
