#!usr/bin/python
#-*- coding:utf-8 _*-
"""
@author:root
@file: linyi_spider.py
@time: 2018/08/{DAY}
"""

from __future__ import absolute_import

from crawling.spiders.lxmlhtml import CustomLxmlLinkExtractor as LinkExtractor
from scrapy.conf import settings
import scrapy
from crawling.items import RawResponseItem
from crawling.items import HospitalItem
from crawling.spiders.redis_spider import RedisSpider
import requests
from lxml import etree

class LinyiSpider(RedisSpider):
    '''
    A spider that walks all links from the requested URL. This is
    the entrypoint for generic crawling.
    '''
    name = "link"
    # name = 'linyi_hospital'

    # allowed_domains = []
    # start_urls = ['http://yiyuan.999ask.com/areasearch/']

    def __init__(self, *args, **kwargs):
        super(LinyiSpider, self).__init__(*args, **kwargs)

    def parse(self, response):

        province_list = response.xpath('//ul[@id="chaxun_lb"]/li/p/a/@href').extract()
        for province in province_list:
            province_url = 'http://yiyuan.999ask.com' + province
            print(province_url)
            yield scrapy.Request(url=province_url, callback=self.parse_hospital_link)
            # break

    def parse_hospital_link(self,response):
        print response
        self.logger.info(response)

        hospital_links = []
        page = 0
        while 1:
            page += 1
            url = response.url.replace('1.html', '{}.html'.format(str(page)))
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'}
            htm = requests.get(url, headers=headers).text
            html = etree.HTML(htm)
            hospital_link = html.xpath(
                '/html/body/div[@class="main"]/div[@class="chaxun_right"]/div[@class="chaxun_r2"]/div[@class="chaxun_r2_2"]/div[@class="lbzs_nr"]/div[@class="lbzs_nr1"]/p/a/@href')
            if len(hospital_link) > 0:
                for link in hospital_link:
                    hospital_links.append('http://yiyuan.999ask.com{}'.format(str(link)))
            else:
                break
        for hospital_url in hospital_links:
            print(hospital_url)
            yield scrapy.Request(url=hospital_url, callback=self.parse_hospital_info)
            # break

            # 获取医院信息
    def parse_hospital_info(self, response):
        item = HospitalItem()
        # 医院网址
        item['hospital_link'] = response.url
        # 医院名称
        item['hospital_name'] = response.xpath('//div[@class="fly_navHd"]/text()').extract_first()

        if item['hospital_name'] == None:
            pass
        else:
            # 医院地址
            item['hospital_address'] = response.xpath('//div[@class="fly_index_yydz"]/text()').extract_first
            # 医院电话
            item['hospital_phone'] = response.xpath('//div[@class="fly_index_yydz"]/text()').extract_first
            # 医院等级
            try:
                item['hospital_grade'] = \
                response.xpath('//span[@class="navDj_m"]/text()').extract()[0].split(u'\xa0')[0]
            except IndexError:
                item['hospital_grade'] = None
            # 医院类型
            try:
                item['hospital_type'] = \
                response.xpath('//span[@class="navDj_m"]/text()').extract()[0].split(u'\xa0')[
                    -1].replace(item['hospital_grade'], '')
            except IndexError:
                item['hospital_type'] = None
            # 医院简介
            item['hospital_introduction'] = ''.join(
                etree.HTML(requests.get(response.url + 'introduction/').text).xpath(
                    '//div[@class="fly_yygkCon"]/p/text()'))
            # 医院科室
            item['hospital_department'] = ', '.join(
                response.xpath('//div[@class="qbks_three_con"]/ul/li/a/text()').extract())
            print(item)
            yield item
            # return item

        #
        # self._logger.info("crawled response {}".format(response))
        # print response
        # self._logger.info("crawled url {}".format(response.request.url))
        # cur_depth = 0
        # if 'curdepth' in response.meta:
        #     cur_depth = response.meta['curdepth']
        #
        # # capture raw response
        # item = RawResponseItem()
        # # populated from response.meta
        # item['appid'] = response.meta['appid']
        # item['crawlid'] = response.meta['crawlid']
        # item['attrs'] = response.meta['attrs']
        #
        # # populated from raw HTTP response
        # item["url"] = response.request.url
        # item["response_url"] = response.url
        # item["status_code"] = response.status
        # item["status_msg"] = "OK"
        # item["response_headers"] = self.reconstruct_headers(response)
        # item["request_headers"] = response.request.headers
        # item["body"] = response.body
        # item["links"] = []
        #
        # # determine whether to continue spidering
        # if cur_depth >= response.meta['maxdepth']:
        #     self._logger.debug("Not spidering links in '{}' because" \
        #                        " cur_depth={} >= maxdepth={}".format(
        #         response.url,
        #         cur_depth,
        #         response.meta['maxdepth']))
        # else:
        #     # we are spidering -- yield Request for each discovered link
        #     link_extractor = LinkExtractor(
        #         allow_domains=response.meta['allowed_domains'],
        #         allow=response.meta['allow_regex'],
        #         deny=response.meta['deny_regex'],
        #         deny_extensions=response.meta['deny_extensions'])
        #
        #     for link in link_extractor.extract_links(response):
        #         # link that was discovered
        #         the_url = link.url
        #         the_url = the_url.replace('\n', '')
        #         item["links"].append({"url": the_url, "text": link.text, })
        #         req = Request(the_url, callback=self.parse)
        #
        #         req.meta['priority'] = response.meta['priority'] - 10
        #         req.meta['curdepth'] = response.meta['curdepth'] + 1
        #
        #         if 'useragent' in response.meta and \
        #                 response.meta['useragent'] is not None:
        #             req.headers['User-Agent'] = response.meta['useragent']
        #
        #         self._logger.debug("Trying to follow link '{}'".format(req.url))
        #         yield req
        #
        # # raw response has been processed, yield to item pipeline
        # yield item