# coding:utf-8

from scrapy.contrib.spiders import CrawlSpider
from ..items import FccsItem
import bs4
import scrapy


class FccsSpider(CrawlSpider):

    #
    #  抓取房产超市网站 规则
    #

    name = 'fccsspider'
    allowed_domains = ['fccs.com']
    start_urls = ['http://www.fccs.com/']

    def parse(self, response):
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取所有城市列表
        #

        citybox = soup.find_all('a', class_='hong')
        for child in citybox:
            city = child.get_text().strip().replace('新房', '')
            city_url = child.get('href')
            city_id = child.get('href').replace('http://', '').replace('.fccs.com/newhouse/', '')
            # print(city, city_id, city_url)
            citys = {
                'website': '房产超市', 'web_url': 'fccs.com',
                'city': city, 'city_id': city_id, 'city_url': city_url
            }
            # if city == '枣庄':
            yield scrapy.Request(city_url, callback=self.parse_city_estate, meta=citys)

    def parse_city_estate(self, response):
        meta = response.meta
        city_url = meta['city_url']
        city_id = meta['city_id']
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 根据城市连接 获取楼盘、区域信息
        #

        estatebox = soup.find_all('div', class_='i1')
        for child in estatebox:
            flag = child.get_text().strip()
            estate = ''
            estate_id = ''
            estate_url = ''
            if flag != '':
                estates = child.find_all('a')
                if estates:
                    estate = child.find_all('a')[0].get_text()
                    estate_url = child.find_all('a')[0].get('href')
                    estate_id = child.find_all('a')[0].get('href').replace(city_url, '').replace('/index.shtml', '')
                    # print(estate, estate_id, estate_url)

            areabox = child.find_next_sibling('div')
            area = areabox.get_text().split(']')[0].replace('[', '')
            # print(area, estate, estate_id, estate_url)
            if estate != '':
                item = FccsItem()
                item['website'] = meta['website']
                item['web_url'] = meta['web_url']
                item['city'] = meta['city']
                item['city_id'] = city_id
                item['area'] = area
                item['estate'] = estate
                item['estate_id'] = estate_id
                item['estate_url'] = estate_url
                yield item

        #
        # 进行翻页获取
        #

        pages = soup.select('.pages-nav > a')
        if pages:
            next_page = pages[-1].get_text()
            if next_page == '下一页»':
                next_url = 'http://' + city_id + '.fccs.com' + pages[-1].get('href')
                yield scrapy.Request(next_url, callback=self.parse_city_estate, meta=meta)
