# coding: utf-8

import bs4
from scrapy.contrib.spiders.crawl import CrawlSpider
import scrapy
from ..items import LoupanItem


class LoupanSpider(CrawlSpider):
    #
    # 楼盘网 规则
    #

    name = 'loupanspider'
    allowed_domains = ['loupan.com']
    start_urls = ['http://www.loupan.com/?all']

    def parse(self, response):
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')
        citybox = soup.select('.city_deatil')[1]

        #
        # 获取 所有城市
        #

        citys = citybox.select('.clearfix > dd > a', limit=475)
        for child in citys:
            city = child.get_text().strip()
            city_url = child.get('href')
            city_id = child.get('href').replace('http://', '').replace('.loupan.com/', '')
            citys = {
                'website': '楼盘网', 'web_url': 'loupan.com',
                'city': city, 'city_id': city_id, 'city_url': city_url
            }
            site_url = city_url + 'xinfang/'
            yield scrapy.Request(site_url, callback=self.parse_city_estate, meta=citys)

    def parse_city_estate(self, response):
        meta = response.meta
        data = response.body
        city_id = meta['city_id']
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取 楼盘、区域信息
        #

        estatebox = soup.select('.info > h3')
        for child in estatebox:
            estate = child.a.get_text().split(' ')[0]
            estate_url = 'http://' + city_id + '.loupan.com' + child.a.get('href')
            estate_id = child.a.get('href').split('/')[-1]
            areas = child.find_next_siblings('p', class_='add')[0]
            area = areas.a.get_text().split('-')[0].replace('[', '')
            meta['estate'] = estate
            meta['estate_id'] = estate_id
            meta['estate_url'] = estate_url
            meta['area'] = area
            site_url = estate_url
            yield scrapy.Request(site_url, callback=self.parse_get_estateid, meta=meta)

        #
        # 进行翻页
        #

        pages = soup.find_all('a', class_='pagenxt')
        if pages:
            next_url = 'http://' + city_id + '.loupan.com' + pages[0].get('href')

            yield scrapy.Request(next_url, callback=self.parse_city_estate, meta=meta)

    def parse_get_estateid(self, response):
        meta = response.meta
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取 楼盘id2
        #

        shaomiao = soup.select('.saomiao > p')
        if shaomiao:
            phone_url = shaomiao[0].get('data-m_url')
            estate_id2 = phone_url.split('/')[-1]
            item = LoupanItem()
            item['website'] = meta['website']
            item['web_url'] = meta['web_url']
            item['city'] = meta['city']
            item['city_id'] = meta['city_id']
            item['area'] = meta['area']
            item['estate'] = meta['estate']
            item['estate_id'] = meta['estate_id']
            item['estate_id2'] = estate_id2
            item['estate_url'] = meta['estate_url']
            yield item
