# coding: utf-8

import bs4
from scrapy.contrib.spiders.crawl import CrawlSpider, Rule
import scrapy
from ..items import Tuitui99Item


class Tuitui99Spider(CrawlSpider):

    #
    # 推推99 抓取规则
    #

    name = 'tui99spider'
    allowed_domains = ['tuitui99.com']
    start_urls = ['http://www.tuitui99.com/']

    def parse(self, response):
        data = '''<div class="clearfix">
<a class="" href="http://beijing.tuitui99.com/" title="北京房产网">北京</a>
<a href="http://guangzhou.tuitui99.com/" title="广州房产网" class="">广州</a>
<a href="http://shenzhen.tuitui99.com/" title="深圳房产网" class="">深圳</a>
<a href="http://chengdu.tuitui99.com/" title="成都房产网" class="">成都</a>
<a href="http://shanghai.tuitui99.com/" title="上海房产网" class="active">上海</a>
<a href="http://tianjin.tuitui99.com/" title="天津房产网" class="">天津</a>
<a href="http://chongqing.tuitui99.com/" title="重庆房产网" class="">重庆</a>
<a href="http://changchun.tuitui99.com/" title="长春房产网" class="">长春</a>
<a href="http://suzhou.tuitui99.com/" title="苏州房产网">苏州</a>
<a href="http://hangzhou.tuitui99.com/" title="杭州房产网">杭州</a>
<a href="http://dalian.tuitui99.com/" title="大连房产网" class="">大连</a>
<a href="http://zhengzhou.tuitui99.com/" title="郑州房产网" class="">郑州</a>
<a href="http://wuhan.tuitui99.com/" title="武汉房产网" class="">武汉</a>
<a href="http://dongguan.tuitui99.com/" title="东莞房产网">东莞</a>
<a href="http://zhuhai.tuitui99.com/" title="珠海房产网">珠海</a>
<a href="http://shenyang.tuitui99.com/" title="沈阳房产网" class="">沈阳</a>
<a href="http://shijiazhuang.tuitui99.com/" title="石家庄房产网" class="">石家庄</a>
<a href="http://xian.tuitui99.com/" title="西安房产网" class="">西安</a>
<a href="http://jinan.tuitui99.com/" title="济南房产网">济南</a>
<a href="http://kunming.tuitui99.com/" title="昆明房产网">昆明</a>
<a href="http://fuzhou.tuitui99.com/" title="福州房产网" class="">福州</a>
<a href="http://nanjing.tuitui99.com/" title="南京房产网" class="">南京</a>
<a href="http://changsha.tuitui99.com/" title="长沙房产网" class="">长沙</a>
<a href="http://huizhou.tuitui99.com/" title="惠州房产网">惠州</a>
<a href="http://hainan.tuitui99.com/" title="海南房产网">海南</a>
<a href="http://foshan.tuitui99.com/" title="佛山房产网" class="">佛山</a>
<a href="http://wuxi.tuitui99.com/" title="无锡房产网" class="">无锡</a>
<a href="http://guiyang.tuitui99.com/" title="贵阳房产网" class="">贵阳</a>
<a href="http://xiamen.tuitui99.com/" title="厦门房产网">厦门</a>
<a href="http://zhongshan.tuitui99.com/" title="中山房产网">中山</a>
<a href="http://yulin.tuitui99.com/" title="榆林房产网" class="">榆林</a>
<a href="http://hefei.tuitui99.com/" title="合肥房产网" class="">合肥</a>
<a href="http://haerbin.tuitui99.com/" title="哈尔滨房产网" class="">哈尔滨</a>
<a href="http://zibo.tuitui99.com/" title="淄博房产网">淄博</a>
<a href="http://qingdao.tuitui99.com/" title="青岛房产网">青岛</a>
<a href="http://weifang.tuitui99.com/" title="潍坊房产网" class="">潍坊</a>
<a href="http://baoding.tuitui99.com/" title="保定房产网" class="">保定</a>
<a href="http://taiyuan.tuitui99.com/" title="太原房产网" class="">太原</a>
<a href="http://langfang.tuitui99.com/" title="廊坊房产网" class="">廊坊</a>
<a href="http://weihai.tuitui99.com/" title="威海房产网">威海</a>
</div>'''

        soup = bs4.BeautifulSoup(data, 'lxml')
        citybox = soup.select('.clearfix > a')
        for child in citybox:
            city = child.get_text()
            city_url = child.get('href') + 'Newhouse.html'
            city_id = child.get('href').replace('http://', '').replace('.tuitui99.com/', '')
            print(city, city_id, city_url)
            citys = {
                'website': '推推99', 'web_url': 'tuitui99.com',
                'city': city, 'city_id': city_id,
            }
            yield scrapy.Request(city_url, callback=self.parse_city_area, meta=citys)

    def parse_city_area(self, response):
        meta = response.meta
        city_id = meta['city_id']
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取 城市下面的区域
        #

        areabox = soup.find_all('dl', class_='clearfix')[0]
        areas = areabox.select('.quyu > a')
        for child in areas:
            area = child.get_text()
            if area != '不限':
                area_url = 'http://' + city_id + '.tuitui99.com' + child.get('href')
                meta['area'] = area
                yield scrapy.Request(area_url, callback=self.parse_city_estate, meta=meta)

    def parse_city_estate(self, response):
        meta = response.meta
        area = meta['area']
        city_id = meta['city_id']
        city = meta['city']
        current_url = response.url
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取 楼盘详细信息
        #

        estatebox = soup.select('.new_info > h3')
        for child in estatebox:
            estate_url = 'http://' + city_id + '.tuitui99.com' + child.find_all('a')[0].get('href')
            estate_id = child.find_all('a')[0].get('href').replace('/', '').replace('.html', '')
            # print(estate_id, estate_url)
            estates = child.find_next_sibling('div')
            estate = estates.select('.where > span')[0].get_text()
            item = Tuitui99Item()
            item['website'] = meta['website']
            item['web_url'] = meta['web_url']
            item['city'] = city
            item['city_id'] = city_id
            item['area'] = area
            item['estate'] = estate
            item['estate_id'] = estate_id
            item['estate_url'] = estate_url
            yield item

        #
        # 翻页
        #

        max_page = int(soup.select('.page_main')[0].get('max'))
        curren_page = int(soup.select('.page_main')[0].get('default'))
        if curren_page < max_page:
            cur = current_url.split('/')[-1]
            if 'p' in cur:
                next_url = current_url.replace(cur, 'p' + str(curren_page + 1) + '.html')
            else:
                aft = '' + cur.replace('.html', '') + '/p' + str(curren_page + 1) + '.html'
                next_url = current_url.replace(cur, aft)

            yield scrapy.Request(next_url, callback=self.parse_city_estate, meta=meta)
