# coding:utf-8

from scrapy.contrib.spiders import CrawlSpider
from ..items import FangwwItem
import scrapy
import bs4


class FangwwSpider(CrawlSpider):
    #
    # 抓取房王网 规则
    #

    name = 'fangwwspider'
    allowed_domains = ['ihk.cn']
    start_urls = ['http://www.ihk.cn']

    def parse(self, response):
        data = '''
                <div class="topbox" id="CityPan">

                            <li><a target="_blank" href="http://gz.ihk.cn">广州</a></li><li><a target="_blank" href="http://sz.ihk.cn">深圳</a></li><li><a target="_blank" href="http://dg.ihk.cn">东莞</a></li><li><a target="_blank" href="http://fs.ihk.cn">佛山</a></li>

                            <li><a target="_blank" href="http://js.ihk.cn">江苏</a></li><li><a target="_blank" href="http://sh.ihk.cn">上海</a></li><li><a target="_blank" href="http://jx.ihk.cn">江西</a></li><li><a target="_blank" href="http://zj.ihk.cn">浙江</a></li><li><a target="_blank" href="http://ah.ihk.cn">安徽</a></li>

                            <li><a target="_blank" href="http://henan.ihk.cn">河南</a></li><li><a target="_blank" href="http://hubei.ihk.cn">湖北</a></li><li><a target="_blank" href="http://hunan.ihk.cn">湖南</a></li>

                            <li><a target="_blank" href="http://tj.ihk.cn">天津</a></li><li><a target="_blank" href="http://nm.ihk.cn">内蒙古</a></li><li><a target="_blank" href="http://bj.ihk.cn">北京</a></li>

                            <li><a target="_blank" href="http://cd.ihk.cn">成都</a></li><li><a target="_blank" href="http://cq.ihk.cn">重庆</a></li>

                            <li><a target="_blank" href="http://sx.ihk.cn">陕西</a></li>

                    </div>
                            '''
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取房王网 城市
        #

        city_box = soup.select('.topbox > li > a')
        for child in city_box:
            city = child.get_text().strip()
            city_id = child.get('href').replace('http://', '').replace('.ihk.cn', '')
            site_url = child.get('href') + '/newhouse/houselist2/'
            if city == '河南':
                city = '郑州'
            if city == '江苏':
                city = '南京'
            if city == '江西':
                city = '南昌'
            if city == '浙江':
                city = '浙江'
            if city == '安徽':
                city = '合肥'
            if city == '湖北':
                city = '武汉'
            if city == '湖南':
                city = '长沙'
            if city == '内蒙古':
                city = '呼和浩特'
            if city == '陕西':
                city = '陕西'

            print(city, city_id, site_url)
            citys = {
                'website': '房王网', 'web_url': 'ihk.cn',
                'city': city, 'city_id': city_id, 'city_url': site_url
            }

            #
            #  浙江 陕西 页面包含其他城市，其他省份只是省会城市
            #

            if city == '浙江' or city == '陕西':
                yield scrapy.Request(site_url, callback=self.parse_province_city, meta=citys)

            else:
                yield scrapy.Request(site_url, callback=self.parse_city_area, meta=citys)

    def parse_province_city(self, response):
        meta = response.meta
        province = meta['city']
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 陕西获取其他城市  浙江获取其他城市页面结构和其他页面不一样
        #

        if province == '陕西':
            citybox = soup.select('.new_search_list > ul > li')[0]
            citys = citybox.find_all('a')
            for child in citys:
                city = child.get_text()
                meta['city'] = city
                meta['area'] = ''
                city_url = child.get('href')
                site_url = 'http://sx.ihk.cn' + city_url
                yield scrapy.Request(site_url, callback=self.parse_city_estate, meta=meta)
        else:
            cityboxs = soup.select('.selectright')[0]
            citybox = cityboxs.select('em > a')
            for child in citybox:
                city = child.get_text()
                city_url = child.get('href')
                meta['city'] = city
                meta['area'] = ''
                site_url = 'http://zj.ihk.cn' + city_url
                print(city, site_url)

    def parse_city_area(self, response):
        meta = response.meta
        city_id = meta['city_id']
        city_url = meta['city_url']
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取城市下面的 区域
        #

        areabox = soup.select('.new_search_list > ul > li')[0]
        areas = areabox.find_all('a')
        if areas:

            for child in areas:
                area = child.get_text()
                meta['area'] = area
                area_url = 'http://' + city_id + '.ihk.cn' + child.get('href')
                yield scrapy.Request(area_url, callback=self.parse_city_estate, meta=meta)
        else:
            #
            # 没有区域直接进行楼盘抓取
            #

            meta['area'] = ''
            yield scrapy.Request(city_url, callback=self.parse_city_estate, meta=meta, dont_filter=True)

    def parse_city_estate2(self, response):
        meta = response.meta
        city_id = meta['city_id']
        area = meta['area']
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 浙江省份下面的楼盘抓取
        #

        estatebox = soup.select('.linkblue')
        for child in estatebox:
            estate = child.get_text()
            estate_url = 'http://zj.ihk.cn' + child.get('href')
            estate_id = child.get('href').split('/')[-1].replace('.html', '')
            item = FangwwItem()
            item['website'] = meta['website']
            item['web_url'] = meta['web_url']
            item['city'] = meta['city']
            item['city_id'] = city_id
            item['area'] = area
            item['estate'] = estate
            item['estate_id'] = estate_id
            item['estate_url'] = estate_url
            yield item

        #
        # 页面翻页
        #

        pages = soup.select('.meneame > a')
        if pages:
            for col in pages:
                ne = col.get_text().strip()
                if ne == '下一页':
                    next_url = col.get('href')
                    site_url = 'http://zj.ihk.cn' + next_url
                    yield scrapy.Request(site_url, callback=self.parse_city_estate2, meta=meta)

    def parse_city_estate(self, response):
        meta = response.meta
        area = meta['area']
        city_id = meta['city_id']
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 楼盘抓取
        #

        estateflag = soup.select('#divNoResult')
        if estateflag:
            print(area, '无')
        else:
            estatebox = soup.select('.list_r01 > h1 > strong > a')
            for child in estatebox:
                estate = child.get_text().strip()
                estate_url = 'http://' + city_id + '.ihk.cn' + child.get('href')
                estate_id = child.get('href').replace('/newhouse/', '').replace('.html', '')
                item = FangwwItem()
                item['website'] = meta['website']
                item['web_url'] = meta['web_url']
                item['city'] = meta['city']
                item['city_id'] = city_id
                item['area'] = area
                item['estate'] = estate
                item['estate_id'] = estate_id
                item['estate_url'] = estate_url
                yield item

            #
            # 进行翻页
            #

            pages = soup.select('#DivNextPage > a')
            if pages:
                for col in pages:
                    next = col.get_text()
                    if next == '下一页':
                        next_url = 'http://' + city_id + '.ihk.cn' + col.get('href')
                        yield scrapy.Request(next_url, callback=self.parse_city_estate, meta=meta)
