# -*- coding:UTF-8 -*-
import scrapy
from scrapy.http import Request, HtmlResponse
from urllib.parse import urlparse
from Lactationer_Master.util.redisClient import redisClient

class Lactationer_Master(scrapy.Spider):

    name = 'Lactationer_Master'
    allow_domain = ['ganji.com']
    start_urls = ['http://www.ganji.com/index.htm']


    # 处理城市列表，带入搜索关键词
    def parse(self, response):
        print('-------------------start project-------------------------')
        # citys
        citys = response.xpath('//div[@class="all-city"]//a/@href').extract()
        for cityUrl in citys:
            r = redisClient()
            r.setCitys(cityUrl)
            cityUrl += 'jiazhengcuiru/?original=%E5%82%AC%E4%B9%B3%E5%B8%88&websearchkw=%E5%82%AC%E4%B9%B3%E5%B8%88'
            request = scrapy.Request(url=cityUrl, callback=self.parse_citys)
            yield request

    # 处理列表以及下一页 callback = this, detail = detail
    def parse_citys(self, response):
        # 带com 样式： //anshan.ganji.com/wuba_info/580134215294700972/ 前面需要加http
        # 其余样式： /fuwu_dian/519258737x/jiazhengcuiru/    前面需要加城市域名 util.parse(response)

        detail_urls = response.xpath('//div[@class="list"]/ul//a[contains(@class,"list-info-title")]/@href').extract()
        parsed_uri = urlparse(response.url)
        domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
        # 遇见赶集302，重新请求该地址，切换代理ip
        if response.url.__contains__('callback'):
            redirect_urls = response.meta['redirect_urls'][0]
            yield scrapy.Request(url=redirect_urls, meta={'download_timeout': 10}, callback=self.parse_citys)
        else:
            for detail_url in detail_urls:
                if(detail_url.__contains__('wuba_info')):
                    # 58的直接进入联系方式页面展示
                    detail_url = 'http:' + detail_url + 'contactus/#tabl'
                else:
                    # 赶集网自己的数据
                    detail_url = domain + detail_url
                r = redisClient()
                r.setCityUrls(detail_url)
            #处理下一页
            next_page = response.xpath('//a[@class="next"]/@href').extract()
            if next_page:
                url = domain + next_page[0]
                yield Request(url, callback=self.parse_citys)
            pass