# coding:utf-8

import scrapy
import bs4
from scrapy.contrib.spiders import CrawlSpider
from ..items import FangddItem


class FangddSpider(CrawlSpider):
    #
    # 抓取房多多网站 规则
    #

    name = 'fangddspider'
    allowed_domains = ['fangdd.com']
    start_urls = ['http://www.fangdd.com/']

    def parse(self, response):
        data = '''<div class="city-list" >
                                    <a data-city-id="121" href="/shanghai">上海</a>
                                    <a data-city-id="3" href="/suzhou">苏州</a>
                                    <a data-city-id="2316" href="/hangzhou">杭州</a>
                                    <a data-city-id="267" href="/nanjing">南京</a>
                                    <a data-city-id="621" href="/yangzhou">扬州</a>
                                    <a data-city-id="1230" href="/qingdao">青岛</a>
                                    <a data-city-id="2567" href="/ningbo">宁波</a>
                                    <a data-city-id="2759" href="/jinan">济南</a>
                                    <a data-city-id="13" href="/wuxi">无锡</a>
                                    <a data-city-id="8358" href="/fuzhou">福州</a>
                                    <a data-city-id="9393" href="/xiamen">厦门</a>
                                    <a data-city-id="9915" href="/linyi">临沂</a>
                                    <a data-city-id="11760" href="/lianyungang">连云港</a>
                                    <a data-city-id="10575" href="/huzhou">湖州</a>
                                    <a data-city-id="12197" href="/zhangzhou">漳州</a>
                                    <a data-city-id="5159" href="/taizhou">泰州</a>
                                    <a data-city-id="576" href="/hefei">合肥</a>
                                    <a data-city-id="8815" href="/quanzhou">泉州</a>

                                    <a data-city-id="852" href="/guangzhou">广州</a>
                                    <a data-city-id="1337" href="/shenzhen">深圳</a>
                                    <a data-city-id="2099" href="/foshan">佛山</a>
                                    <a data-city-id="10668" href="/haikou">海口</a>
                                    <a data-city-id="1492" href="/huizhou">惠州</a>
                                    <a data-city-id="1406" href="/dongguan">东莞</a>
                                    <a data-city-id="100265" href="/zhaoqing">肇庆</a>

                                    <a data-city-id="339" href="/chongqing">重庆</a>
                                    <a data-city-id="788" href="/kunming">昆明</a>
                                    <a data-city-id="753" href="/guiyang">贵阳</a>
                                    <a data-city-id="450" href="/chengdu">成都</a>
                                    <a data-city-id="100047" href="/luzhou">泸州</a>

                                    <a data-city-id="619" href="/wuhan">武汉</a>
                                    <a data-city-id="8660" href="/luoyang">洛阳</a>
                                    <a data-city-id="112303" href="/huangshi">黄石</a>
                                    <a data-city-id="10256" href="/xiangyang">襄阳</a>
                                    <a data-city-id="9528" href="/yichang">宜昌</a>
                                    <a data-city-id="2127" href="/changsha">长沙</a>
                                    <a data-city-id="100298" href="/xinyang">信阳</a>

                                    <a data-city-id="1145" href="/xian">西安</a>

                                    <a data-city-id="2179" href="/beijing">北京</a>
                                    <a data-city-id="9119" href="/taiyuan">太原</a>
                                    <a data-city-id="2323" href="/tianjin">天津</a>

                    </div>'''
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取 网站所有城市
        #

        citybox = soup.select('.city-list > a')
        for child in citybox:
            city = child.get_text()
            city_url = 'http://xf.fangdd.com' + child.get('href')
            city_id = child.get('href').replace('/', '')
            # print(city, city_id, city_url)
            citys = {
                'website': '房多多', 'web_url': 'fangdd.com',
                'city': city, 'city_id': city_id
            }
            # if city == '洛阳':
            yield scrapy.Request(city_url, callback=self.parse_city_estate, meta=citys)

    def parse_city_estate(self, response):
        meta = response.meta
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取楼盘 区域 等信息
        #

        estatebox = soup.select('.detail > ul')
        for child in estatebox:
            estates = child.find_all('li', limit=2)
            estate = estates[0].get_text().strip().split(' ')[0].strip()
            estate_id = estates[0].a.get('data-house-id')
            estate_url = 'http://xf.fangdd.com' + estates[0].a.get('href').split('?')[0]
            area = estates[1].get_text().strip().split(' ')[0].strip()
            # print(area, estate, estate_id, estate_url)
            item = FangddItem()
            item['website'] = meta['website']
            item['web_url'] = meta['web_url']
            item['city'] = meta['city']
            item['city_id'] = meta['city_id']
            item['area'] = area
            item['estate'] = estate
            item['estate_id'] = estate_id
            item['estate_url'] = estate_url
            yield item

        #
        # 进行翻页
        #

        pages = soup.select('.xf-step > a')
        if pages:
            next_page = pages[-1].get_text()
            if next_page == '>':
                next_url = 'http://xf.fangdd.com' + pages[-1].get('href')
                yield scrapy.Request(next_url, callback=self.parse_city_estate, meta=meta)
