# 爬虫文件描述
# /*********************************************************
# Copyright @ 苏州瑞泰信息技术有限公司 All rights reserved.
# 创建人   : Luck Chen
# 创建时间 : 2023/08/23 00:00:00
# 说明     : 民福康-分布式爬虫
# *********************************************************/

import time
import scrapy
from scrapy_redis.spiders import RedisSpider
from scrapy.http import Request
from ..items import hosdata, docdata
import unicodedata
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
from selenium.webdriver.common.by import By
import re
import csv


class MfkRedisSpiderSpider(scrapy.Spider):
    name = 'mfk_redis_spider'
    # start_urls = ['https://mip.mfk.com/doctor/?area_id=0']
    start_urls = ['https://mip.mfk.com/']

    def parse(self, response):
        with open("./C0153256/mfk_place.csv", "r", encoding="utf-8") as file:
            reader = csv.reader(file)
            list1 = [row[1] for row in reader]
        with open("./C0153256/mfk_place.csv", "r", encoding="utf-8") as file:
            reader = csv.reader(file)
            list2 = [row[2] for row in reader]
        with open("./C0153256/mfk_place.csv", "r", encoding="utf-8") as file:
            reader = csv.reader(file)
            list3 = [row[3] for row in reader]
        for province, city_code, city in zip(list1, list2, list3):
            with open("./C0153256/der_place.csv", "r", encoding="utf-8") as file:
                reader = csv.reader(file)
                list1 = [row[1] for row in reader]
            with open("./C0153256/der_place.csv", "r", encoding="utf-8") as file:
                reader = csv.reader(file)
                list2 = [row[2] for row in reader]
            with open("./C0153256/der_place.csv", "r", encoding="utf-8") as file:
                reader = csv.reader(file)
                list3 = [row[3] for row in reader]
            for department, department_code, department1 in zip(list1, list2, list3):
                for i in range(1, 16):

                    # print(province, city_code, city, department, department_code, department1)
                    url = 'https://mip.mfk.com/index.php?m=mfk&c=doctor&a=get_doctor_list_ajax&department_id={}&t=&part_id=0&illness_id=0&area_id={}&q=&rank_type=0&t=&page={}'.format(department_code, city_code, i)
                    yield Request(url=url, meta={"province": province, "city": city, "department": department, "department1": department1}, callback=self.doc_parse)

    def doc_parse(self, response):
        province = response.meta["province"]
        city = response.meta["city"]
        department = response.meta["department"]
        department1 = response.meta["department1"]
        doc_list = response.xpath(
            "/html/body/div/div[1]/a/@href").extract()
        for doc_url in doc_list:
            url = 'https://mip.mfk.com' + doc_url
            yield Request(url=url,
                          meta={"province": province, "city": city, 'department': department, 'department1': department1},
                          callback=self.doc_detail_parse)

    def doc_detail_parse(self, response):
        item = docdata()
        item['province'] = response.meta["province"]
        item['city'] = response.meta["city"]
        item['department'] = response.meta["department"]
        item['department1'] = response.meta["department1"]
        if len(response.xpath(
                "//div[2]/div/div/h2/a").extract()) > 0:
            item['name'] = response.xpath(
                "//div[2]/div/div/h2/a").xpath(
                "string(.)").extract()[0]
        else:
            item['name'] = response.xpath(
                "//section/div[@class='registered-header']/dl[@class='flex']/dd/p[@class='p1']/a").xpath(
                "string(.)").extract()[0]
        if len(response.xpath(
                "/html/body/div[2]/div/div/div[1]/span[2]").extract()) > 0:
            item['jobtitle'] = response.xpath(
                    "/html/body/div[2]/div/div/div[1]/span[2]").xpath(
                    "string(.)").extract()[0]
        else:
            item['jobtitle'] = response.xpath(
                "//section/div[@class='registered-header']/dl[@class='flex']/dd/div[@class='p2 flex box-pack-between']/p[@class='p2_p']/a[1]").xpath(
                "string(.)").extract()[0]
        if len(response.xpath(
                "//div[2]/div/div/div[2]/span[1]").extract()) > 0:
            item['hospital'] = response.xpath(
                    "//div[2]/div/div/div[2]/span[1]").xpath(
                    "string(.)").extract()[0]
        else:
            item['hospital'] = response.xpath(
                "//section/div[@class='registered-header']/dl[@class='flex']/dd/p[@class='p3 flex box-align-center']/span/a").xpath(
                "string(.)").extract()[0]
        if len(response.xpath(
                "//section/div[@class='registered-header']/dl[@class='flex']/dd/div[@class='p4 flex box-pack-between']/div[@class='p4_inlineFirst']/span[1]").extract()) > 0:
            item['grade'] = response.xpath(
                    "//section/div[@class='registered-header']/dl[@class='flex']/dd/div[@class='p4 flex box-pack-between']/div[@class='p4_inlineFirst']/span[1]").xpath(
                    "string(.)").extract()[0]
        else:
            item['grade'] = ''
        if len(response.xpath(
                "//div[2]/div/div/div[2]/span[2]").extract()) > 0:
            item['type'] = response.xpath(
                    "//div[2]/div/div/div[2]/span[2]").xpath(
                    "string(.)").extract()[0]
        else:
            item['type'] = response.xpath(
                "//section/div[@class='registered-header']/dl[@class='flex']/dd/div[@class='p4 flex box-pack-between']/div[@class='p4_inlineFirst']/span[2]").xpath(
                "string(.)").extract()[0]
        # print(response.xpath(
        #     "//div[3]/div[2]/mip-showmore/div[2]/text()").extract())
        if len(response.xpath(
                "//div[3]/div[1]/mip-showmore").extract()) > 0:
            item['goodat'] = \
                response.xpath(
                    "//div[3]/div[1]/mip-showmore").xpath(
                    "string(.)").extract()[0].replace("\n", "").replace("\r", "").replace("\u3000", "").replace(" ", "")
        elif len(response.xpath(
                "//section/div[@class='registered-content']/div[@class='registered-GoodDisease']/ul").extract()) > 0:
            item['goodat'] = response.xpath(
                    "//section/div[2]/div[1]/ul/li").xpath(
                    "string(.)").extract()
        if len(response.xpath(
                "//div[3]/div[2]/mip-showmore/span[2]").extract()) > 0:
            item['remarks'] = \
                response.xpath(
                    "//div[3]/div[2]/mip-showmore/span[2]").xpath(
                    "string(.)").extract()[0].replace("\n", "").replace("\r", "").replace("\u3000", "").replace(" ", "")
        else:
            item['remarks'] = response.xpath(
                    "//section/div[@class='registered-content']/div[@class='registered-doctorIntroduction']/div[@class='registeredContent']/p/span[@class='span1']").xpath(
                    "string(.)").extract()[0].replace("\n", "").replace("\r", "").replace("\u3000", "").replace(" ", "")
        # print(response.xpath(
        #             "/html/body/section/div[2]/div[4]/div[2]/div[2]/div/dl/dd/div/a[1]/p/b").xpath(
        #             "string(.)").extract())
        if len(response.xpath(
                "/html/body/section/div[@class='registered-content']/div[@class='InquiringServiceNewDiv']/div[@id='hospital_div']/div/div[@class='reservationServiceList']/dl[@class='flex']/dd/div[@class='reservationServiceLeftDiv flex-wrap box-tb box-pack-between']/a[1]/p[@class='reservationServiceLeftP1 flex']/b").extract()) > 0:
            item['multi_job'] = \
                response.xpath(
                    "/html/body/section/div[@class='registered-content']/div[@class='InquiringServiceNewDiv']/div[@id='hospital_div']/div/div[@class='reservationServiceList']/dl[@class='flex']/dd/div[@class='reservationServiceLeftDiv flex-wrap box-tb box-pack-between']/a[1]/p[@class='reservationServiceLeftP1 flex']/b").xpath(
                    "string(.)").extract()
        else:
            item['multi_job'] = response.xpath(
                    "/html/body/div[4]/div/h2/div").xpath(
                    "string(.)").extract()
        item['url'] = response.url
        # print(item)
        yield item
    #
