# coding:utf-8

from scrapy.contrib.spiders import CrawlSpider
from ..items import House365Item
import bs4
import scrapy


class House365Spider(CrawlSpider):
    #
    # 抓取365 地产 规则
    #

    name = 'house365spider'
    allowed_domains = ['house365.com']
    start_urls = ['http://www.house365.com/']

    def parse(self, response):
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取 城市列表
        #

        citybox = soup.select('.allcitys-detail > a')
        for child in citybox:
            city = child.get_text()
            city_url = child.get('href')
            city_id = child.get('href').replace('http://', '').replace('.house365.com/', '').replace('index.html', '')
            site_url = 'http://newhouse.' + city_id + '.house365.com/house/channel-1'
            print(city, city_id, site_url)
            citys = {
                'website': '365地产', 'web_url': 'house365.com',
                'city': city, 'city_id': city_id
            }
            # if city == '郑州':
            yield scrapy.Request(site_url, callback=self.parse_city_area, meta=citys)

    def parse_city_area(self, response):
        meta = response.meta
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取 城市下面的 区域
        #

        arealist = soup.select('.w920')
        if arealist:
            areabox = soup.select('.w920')[1]
            areas = areabox.select('a')
            for child in areas:
                area = child.get_text()
                area_url = child.get('href')
                if area != '全部':
                    meta['area'] = area
                    yield scrapy.Request(area_url, callback=self.parse_city_estate, meta=meta)

    def parse_city_estate(self, response):
        meta = response.meta
        area = meta['area']
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取楼盘信息
        #

        estatebox = soup.select('.tit > h3')
        for child in estatebox:
            estate = child.a.get_text()
            estate_url = child.a.get('href')
            estate_id = child.a.get('href').split('/')[-2]
            meta['estate'] = estate
            meta['estate_url'] = estate_url
            meta['estate_id'] = estate_id

            site_url = estate_url
            yield scrapy.Request(site_url, callback=self.parse_get_estate_id2, meta=meta)

        #
        # 翻页
        #

        pages = soup.select('.pageList')
        if pages:
            next_page = soup.select('.pageList > a')[-3]
            next = next_page.get_text().strip()
            if next == '下一页':
                next_url = next_page.get('href')

                yield scrapy.Request(next_url, callback=self.parse_city_estate, meta=meta)

    def parse_get_estate_id2(self, response):
        meta = response.meta
        data = response.body
        soup = bs4.BeautifulSoup(data, 'html5lib')

        #
        # 获取楼盘 id2
        #

        result = soup.findAll('input', class_='prj_id')
        if result:
            estate_id2 = result[0].get('value')
            item = House365Item()
            item['website'] = meta['website']
            item['web_url'] = meta['web_url']
            item['city'] = meta['city']
            item['city_id'] = meta['city_id']
            item['area'] = meta['area']
            item['estate'] = meta['estate']
            item['estate_id'] = meta['estate_id']
            item['estate_url'] = meta['estate_url']
            item['estate_id2'] = estate_id2
            yield item
