# -*- coding: utf-8 -*-
import logging
import sys
from urllib2 import Request

import scrapy
from scrapy.http import Request
import pymysql

from hospitalInfo.items import HospitalinfoItem

log = logging.getLogger(__file__)
reload(sys)
sys.setdefaultencoding('utf8')


class hospitalSpider(scrapy.Spider):
    name = 'hospital'
    allowed_domains = ['juzizhou.net']
    start_urls = ['http://yyk.99.com.cn/city.html']
    item = HospitalinfoItem()

    # mysql数据的提取
    def parse(self, response):
        industry = response.xpath("//div[@id='areacontent']/ul/li/a/text()").extract()
        print industry
        industry_link = response.xpath("//div[@id='areacontent']/ul/li/a/@href").extract()
        for index in range(0, len(industry)):
            url = 'http://yyk.99.com.cn' + industry_link[index]
            print url
            yield Request(url=url, meta={'industry': industry[index]}, callback=self.city_page,
                          dont_filter=True)

    def city_page(self, response):
        """

        :param response:
        :return:
        """
        cities = response.xpath("//div[@class='fontlist']/ul/li/a/text()").extract()
        print cities
        city_link = response.xpath("//div[@class='fontlist']/ul/li/a/@href").extract()
        for index in range(0, len(cities)):
            url = city_link[index]
            print url
            yield Request(url=url, meta={'industry': cities[index]}, callback=self.hospital_list,
                          dont_filter=True)

    def hospital_list(self, response):
        """
        医院列表页面
        :param response:
        :return:
        """
        hospitals = response.xpath("//div[@class='tablist']/ul/li/a/span/text()").extract()
        positions = response.xpath("//div[@class='position']/a/text()").extract()[1:]
        print hospitals
        hospitals_link = response.xpath("//div[@class='tablist']/ul/li/a/@href").extract()
        for index in range(0, len(hospitals)):
            url = hospitals_link[index] + 'jianjie.html'
            item = HospitalinfoItem()
            item['province'] = positions[0]
            item['city'] = positions[0] if len(positions) == 2 else positions[1]
            item['district'] = positions[1] if len(positions) == 2 else positions[2]
            yield Request(url=url, meta={'industry': hospitals[index], 'item': item}, callback=self.hospital_info,
                          dont_filter=True)

    def hospital_info(self, response):
        """

        :param response:
        :return:
        """
        name = response.xpath(u"//div[@class='hpi_content clearbox']/ul/li/span/text()").extract()[0]
        # config = {'host': '127.0.0.1', 'user': 'root', 'password': '123456', 'db': 'hospital',
        #           'charset': 'utf8mb4'}
        config = {'host': 'rm-bp1r11ynaceh2h2p5-rm-bp1r11ynaceh2h2p5788.mysql.rds.aliyuncs.com', 'user': 'root', 'password': 'LEMON521$', 'db': 'hospital',
                  'charset': 'utf8mb4'}
        connection = pymysql.connect(**config)
        sqlad = "select * from yd_hospital where name ='" + name + "'"
        already_exist = 0
        try:
            already_exist = connection.query(sqlad)
        except Exception as err:
            print(err)
            log.info(err)
        print('result' + str(already_exist))
        if already_exist == 0:
            item = response.meta['item']
            item['name'] = response.xpath(u"//div[@class='hpi_content clearbox']/ul/li/span/text()").extract()[0]
            item['alias_name'] = response.xpath(
                u'normalize-space(string(.//*[text()="医院别名："]/following-sibling::node()[1]))').extract_first()
            item['property'] = response.xpath(
                u'normalize-space(string(.//*[text()="医院性质："]/following-sibling::node()[1]))').extract_first()
            item['grade'] = response.xpath(
                u'normalize-space(string(.//*[text()="医院等级："]/following-sibling::node()[1]))').extract_first()
            item['tel'] = response.xpath(
                u'normalize-space(string(.//*[text()="联系电话："]/following-sibling::node()[1]))').extract_first()
            item['address'] = response.xpath(
                u'normalize-space(string(.//*[text()="联系地址："]/following-sibling::node()[1]))').extract_first()
            dean = response.xpath(u"//div[@class='leftpad10 hpbasicinfo']/table/tr[2]/td[2]/text()").extract_first()
            created_time = response.xpath(u"//div[@class='leftpad10 hpbasicinfo']/table/tr[2]/td[4]/text()").extract_first()
            is_health_insurance = response.xpath(
                u"//div[@class='leftpad10 hpbasicinfo']/table/tr[4]/td[6]/text()").extract_first()
            bus_route_data = response.xpath(u"//div[@class='leftpad10 contact']/table")
            bus_route = bus_route_data.xpath(u"//td[@class='lasttdr lasttd']/text()").extract_first()
            url = response.xpath(u"//div[@class='leftpad10 contact']/table/tr[1]/td[2]/a/@href").extract_first()
            advanced_equipment_data = response.xpath(u"//div[@class='mainleft']/div[3]/div[@class='hpcontent']")
            advanced_equipment = advanced_equipment_data.xpath(u"normalize-space(string(.))").extract_first()
            summary_data = response.xpath(u"//div[@class='mainleft']/div[4]/div[@class='hpcontent']")
            summary = summary_data.xpath(u"normalize-space(string(.))").extract_first()
            honor_data = response.xpath(u"//div[@class='mainleft']/div[5]/div[@class='hpcontent']")
            honor = honor_data.xpath(u"normalize-space(string(.))").extract_first()
            item['dean'] = dean.replace("\n", "").strip() if dean else ''
            item['created_time'] = created_time.replace("\n", "").strip() if created_time else ''
            item['advanced_equipment'] = advanced_equipment if advanced_equipment else ''
            item['summary'] = summary if summary else ''
            item['website'] = url.replace("\n", "").strip() if url else ''
            item['bus_route'] = bus_route.replace("\n", "").strip() if bus_route else ''
            item['honor'] = honor if honor else ''
            item['is_health_insurance'] = is_health_insurance.replace("\n", "").strip() if is_health_insurance else ''
            print item
            yield item
        else:
            pass
