import bs4
from scrapy.contrib.spiders.crawl import CrawlSpider
import scrapy
from ..items import LanfangwItem


class LanfangwSpider(CrawlSpider):
    #
    # 爬去蓝房网 规则
    #

    name = 'lanfangwspider'
    allowed_domains = ['lanfw.com']
    start_urls = ['http://bj.lanfw.com/']

    def parse(self, response):
        data = '''<a target="_blank" href="http://house.lanfw.com/fz">福州</a><a target="_blank" href="http://house.lanfw.com/xm">厦门</a><a target="_blank" href="http://house.lanfw.com/qz">泉州</a><a target="_blank" href="http://house.lanfw.com/zz">漳州</a><a target="_blank" href="http://house.lanfw.com/ly">龙岩</a>
<a target="_blank" href="http://house.lanfw.com/pingtan">平潭</a><a target="_blank" href="http://house.lanfw.com/sm">三明</a><a target="_blank" href="http://house.lanfw.com/pt">莆田</a><a target="_blank" href="http://house.lanfw.com/np">南平</a>
<a target="_blank" href="http://house.lanfw.com/bj">北京</a><a target="_blank" href="http://house.lanfw.com/sh">上海</a><a target="_blank" href="http://house.lanfw.com/cz">常州</a><a target="_blank" href="http://house.lanfw.com/yc">盐城</a>'''
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取 所有城市
        #

        citybox = soup.find_all('a')
        for child in citybox:
            city = child.get_text()
            city_id = child.get('href').split('/')[-1]
            site_url = child.get('href') + '/search/'

            citys = {
                'website': '蓝房网', 'web_url': 'lanfw.com',
                'city': city, 'city_id': city_id
            }
            yield scrapy.Request(site_url, callback=self.parse_city_area, meta=citys)

    def parse_city_area(self, response):
        meta = response.meta
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取 城市下面的区域
        #

        areabox = soup.select('.address')[0]
        areas = areabox.find_all('a')
        for child in areas:
            area = child.get_text()
            area_url = child.get('href')
            if area != '全部':
                meta['area'] = area
                yield scrapy.Request(area_url, callback=self.parse_city_estate, meta=meta)

    def parse_city_estate(self, response):
        meta = response.meta
        area = meta['area']
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')
        estatebox = soup.select('.title > h2 > a')
        for child in estatebox:
            estate = child.get_text()
            estate_url = child.get('href')
            estate_id = child.get('href').split('/')[-1]
            item = LanfangwItem()
            item['website'] = meta['website']
            item['web_url'] = meta['web_url']
            item['city'] = meta['city']
            item['city_id'] = meta['city_id']
            item['area'] = area
            item['estate'] = estate
            item['estate_id'] = estate_id
            item['estate_url'] = estate_url
            yield item

        #
        # 进行翻页
        #

        pages = soup.select('.pages > ul > li > a')
        if pages:
            next_page = soup.select('.pages > ul > li > a')[-1]
            next = next_page.get_text()
            if next == '下一页':
                next_url = next_page.get('href')
                yield scrapy.Request(next_url, callback=self.parse_city_estate, meta=meta)
