# coding: utf-8

import bs4
from scrapy.contrib.spiders.crawl import CrawlSpider
import scrapy
from ..items import ZhifangItem


class ZhifangSpider(CrawlSpider):
    #
    # 智房网 规则
    #

    name = 'zhifangspider'
    allowed_domains = ['zhifang.com']
    start_urls = ['http://www.zhifang.com/xinpan/']

    def parse(self, response):
        data = '''<p>
            <a href="/project/03zj000000000000000.html">浙江</a>
            <a href="/project/06yn000000000000000.html">云南</a>
            <a href="/project/07xj000000000000000.html">新疆</a>
            <a href="/project/05hk000000000000000.html">香港</a>
            <a href="/project/06xz000000000000000.html">西藏</a>
            <a href="/project/01tj000000000000000.html">天津</a>
            <a href="/project/05tw000000000000000.html">台湾</a>
            <a href="/project/06sc000000000000000.html">四川</a>
            <a href="/project/03sh000000000000000.html">上海</a>
            <a href="/project/07xi000000000000000.html">陕西</a>
            <a href="/project/01sx000000000000000.html">山西</a>
            <a href="/project/03sd000000000000000.html">山东</a>
            <a href="/project/07qh000000000000000.html">青海</a>
            <a href="/project/07nx000000000000000.html">宁夏</a>
            <a href="/project/01nm000000000000000.html">内蒙古</a>
            <a href="/project/02ln000000000000000.html">辽宁</a>
            <a href="/project/03jx000000000000000.html">江西</a>
            <a href="/project/03js000000000000000.html">江苏</a>
            <a href="/project/02jl000000000000000.html">吉林</a>
            <a href="/project/04hu000000000000000.html">湖南</a>
            <a href="/project/04hi000000000000000.html">湖北</a>
            <a href="/project/02hl000000000000000.html">黑龙江</a>
            <a href="/project/04he000000000000000.html">河南</a>
            <a href="/project/01hb000000000000000.html">河北</a>
            <a href="/project/05hn000000000000000.html">海南</a>
            <a href="/project/06gz000000000000000.html">贵州</a>
            <a href="/project/05gx000000000000000.html">广西</a>
            <a href="/project/05gd000000000000000.html">广东</a>
            <a href="/project/07gs000000000000000.html">甘肃</a>
            <a href="/project/03fj000000000000000.html">福建</a>
            <a href="/project/06cq000000000000000.html">重庆</a>
            <a href="/project/01bj000000000000000.html">北京</a>
            <a href="/project/05mo000000000000000.html">澳门</a>
            <a href="/project/03ah000000000000000.html">安徽</a>            </p>'''
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取 所有省份
        #

        cityboxs = soup.select('p > a')

        for child in cityboxs:
            province = child.get_text()
            province_id = child.get('href').split('/')[-1].replace('000000000000000.html', '')
            city_url = 'http://www.zhifang.com' + child.get('href')
            citys = {
                'website': '智房网', 'web_url': 'http://www.zhifang.com/',
                'province': province, 'city_id': province_id, 'province_url': city_url
            }
            yield scrapy.Request(city_url, callback=self.parse_province_city, meta=citys)

    def parse_province_city(self, response):
        meta = response.meta
        province = meta['province']
        province_url = meta['province_url']
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 省份获取城市 直辖市直接走楼盘
        #

        if province == '北京' or province == '香港' or province == '天津' or province == '上海' or province == '重庆' or province == '澳门' or province == '海南':
            meta['city'] = province
            yield scrapy.Request(province_url, callback=self.parse_city_estate, meta=meta, dont_filter=True)
        else:
            citybox = soup.select('.topsearch > ul > li')[1]
            citys = citybox.select('a')
            for child in citys:
                city = child.get_text()
                if city == '不限':
                    city = ''
                city_url = 'http://www.zhifang.com' + child.get('href').strip()
                meta['city'] = city
                if city_url != '':
                    yield scrapy.Request(city_url, callback=self.parse_city_estate, meta=meta)

    def parse_city_estate(self, response):
        meta = response.meta
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')
        estatebox = soup.select('.lpbox1 > li')

        #
        # 获取 楼盘信息
        #

        for child in estatebox:
            estates = child.select('.title > h4 > a')[0]
            estate = estates.get_text()
            estate_url = 'http://www.zhifang.com' + estates.get('href')
            estate_id = estates.get('href').replace('/', '')
            meta['estate'] = estate
            meta['estate_url'] = estate_url
            meta['estate_id'] = estate_id
            site_url = estate_url
            print(site_url)
            yield scrapy.Request(site_url, callback=self.parse_get_area, meta=meta)

        #
        # 翻页
        #

        pages = soup.select('.meneame')[0]
        # print(pages)
        if pages:
            page = pages.find_all('a')
            if page:
                next = pages.find_all('a')[-1].get_text().strip()
                next_url = 'http://www.zhifang.com' + pages.find_all('a')[-1].get('href')
                if next == '下一页':
                    yield scrapy.Request(next_url, callback=self.parse_city_estate, meta=meta)

    def parse_get_area(self, response):
        meta = response.meta
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取 楼盘所属区域
        #

        areas = soup.select('.buildingtopnav > a')
        if areas:
            area = areas[-1].get_text()
            item = ZhifangItem()
            item['website'] = meta['website']
            item['web_url'] = meta['web_url']
            item['city'] = meta['city']
            item['city_id'] = meta['city_id']
            item['area'] = area
            item['estate'] = meta['estate']
            item['estate_id'] = meta['estate_id']
            item['estate_url'] = meta['estate_url']
            yield item
