# #!/usr/bin/env python
# # -*- coding:utf-8 -*-
# import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
# import scrapy
# # from items import HospitalItem
# from demo.items import HospitalItem
# import re
# import requests
# from lxml import etree
#
#
# class LinyihospitalSpider(scrapy.Spider):
#     name = 'linyi_hospital'
#     allowed_domains = []
#     start_urls = ['http://yiyuan.999ask.com/areasearch/']
#
#     def parse(self, response):
#         province_list = response.xpath('//ul[@id="chaxun_lb"]/li/p/a/@href').extract()
#         for province in province_list:
#             province_url = 'http://yiyuan.999ask.com' + province
#             # print(province_url)
#             yield scrapy.Request(url=province_url, callback=self.parse_hospital_link)
#             # break
#
#     def parse_hospital_link(self, response):
#         hospital_links = []
#         page = 0
#         while 1:
#             page += 1
#             url = response.url.replace('1.html', '{}.html'.format(str(page)))
#             headers = {
#                 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'}
#             htm = requests.get(url, headers=headers).text
#             html = etree.HTML(htm)
#             hospital_link = html.xpath(
#                 '/html/body/div[@class="main"]/div[@class="chaxun_right"]/div[@class="chaxun_r2"]/div[@class="chaxun_r2_2"]/div[@class="lbzs_nr"]/div[@class="lbzs_nr1"]/p/a/@href')
#             if len(hospital_link) > 0:
#                 for link in hospital_link:
#                     hospital_links.append('http://yiyuan.999ask.com{}'.format(str(link)))
#             else:
#                 break
#         for hospital_url in hospital_links:
#             # print(hospital_url)
#             yield scrapy.Request(url=hospital_url, callback=self.parse_hospital_info)
#             # break
#
#     def parse_hospital_info(self, response):
#         item = HospitalItem()
#         item['hospital_link'] = response.url
#         try:
#             item['hospital_name'] = response.xpath('//div[@class="fly_navHd"]/text()').extract()[0]
#         except:
#             item['hospital_name'] = None
#         if item['hospital_name'] == None:
#             pass
#         else:
#             try:
#                 item['hospital_address'] = response.xpath('//div[@class="fly_index_yydz"]/text()').extract()[0]
#             except:
#                 item['hospital_address'] = None
#             try:
#                 item['hospital_phone'] = response.xpath('//div[@class="fly_index_yydz"]/text()').extract()[0]
#             except:
#                 item['hospital_phone'] = None
#             # try:
#             #     item['hospital_grade'] = response.xpath('//span[@class="navDj_m"]/text()').extract()[0].split('\xa0')[0]
#             # except IndexError:
#             #     item['hospital_grade'] = None
#             # try:
#             #     item['hospital_type'] = response.xpath('//span[@class="navDj_m"]/text()').extract()[0].decode('utf8').split('\xa0')[
#             #         -1].replace(item['hospital_grade'], '')
#             # except IndexError:
#             #     item['hospital_type'] = None
#             # item['hospital_introduction'] = ''.join(etree.HTML(requests.get(response.url + 'introduction/').text).xpath(
#             #     '//div[@class="fly_yygkCon"]/p/text()'))
#             # item['hospital_department'] = ', '.join(
#             #     response.xpath('//div[@class="qbks_three_con"]/ul/li/a/text()').extract())
#             return item
#             print(item)


# -*- coding: utf-8 -*-
# import scrapy
# import requests
from lxml import etree
from scrapy.http import Request
from crawling.spiders.lxmlhtml import CustomLxmlLinkExtractor as LinkExtractor

from crawling.items import HospitalItem
from crawling.spiders.redis_spider import RedisSpider

class LinyihospitalSpider(RedisSpider):



    def __init__(self, *args, **kwargs):
        super(LinyihospitalSpider, self).__init__(*args, **kwargs)


    name = 'linyi_hospital'

    allowed_domains = []
    start_urls = ['http://yiyuan.999ask.com/areasearch/']

    # 获取省份列表
    def parse(self, response):
        # self.logger.
        self._logger.debug("crawled url {}".format(response.request.url))
        province_list = response.xpath('//ul[@id="chaxun_lb"]/li/p/a/@href').extract()
        for province in province_list:
            province_url = 'http://yiyuan.999ask.com' + province
            # print(province_url)
            yield Request(url=province_url, callback=self.parse_hospital_link)
            # break

    # 获取医院链接
    def parse_hospital_link(self, response):
        hospital_links = []
        page = 0
        while 1:
            page += 1
            url = response.url.replace('1.html', '{}.html'.format(str(page)))
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'}
            htm = Request.get(url, headers=headers).text
            html = etree.HTML(htm)
            hospital_link = html.xpath(
                '/html/body/div[@class="main"]/div[@class="chaxun_right"]/div[@class="chaxun_r2"]/div[@class="chaxun_r2_2"]/div[@class="lbzs_nr"]/div[@class="lbzs_nr1"]/p/a/@href')
            if len(hospital_link) > 0:
                for link in hospital_link:
                    hospital_links.append('http://yiyuan.999ask.com{}'.format(str(link)))
            else:
                break
        for hospital_url in hospital_links:
            # print(hospital_url)
            yield Request(url=hospital_url, callback=self.parse_hospital_info)
            # break

    # 获取医院信息
    def parse_hospital_info(self, response):
        item = HospitalItem()
        # 医院网址
        item['hospital_link'] = response.url
        # 医院名称
        try:
            item['hospital_name'] = response.xpath('//div[@class="fly_navHd"]/text()').extract()[0]
        except:
            item['hospital_name'] = None
        if item['hospital_name'] == None:
            pass
        else:
            # 医院地址
            try:
                item['hospital_address'] = response.xpath('//div[@class="fly_index_yydz"]/text()').extract()[0]
            except:
                item['hospital_address'] = None
            # 医院电话
            try:
                item['hospital_phone'] = response.xpath('//div[@class="fly_index_yydz"]/text()').extract()[0]
            except:
                item['hospital_phone'] = None
            # 医院等级
            try:
                item['hospital_grade'] = response.xpath('//span[@class="navDj_m"]/text()').extract()[0].split(u'\xa0')[0]
            except IndexError:
                item['hospital_grade'] = None
            # 医院类型
            try:
                item['hospital_type'] = response.xpath('//span[@class="navDj_m"]/text()').extract()[0].split(u'\xa0')[
                    -1].replace(item['hospital_grade'], '')
            except IndexError:
                item['hospital_type'] = None
            # 医院简介
            item['hospital_introduction'] = ''.join(etree.HTML(Request.get(response.url + 'introduction/').text).xpath(
                '//div[@class="fly_yygkCon"]/p/text()'))
            # 医院科室
            item['hospital_department'] = ', '.join(
                response.xpath('//div[@class="qbks_three_con"]/ul/li/a/text()').extract())
            print(item)
            # return item
