# coding:utf-8

import scrapy
import bs4
from scrapy.contrib.spiders import CrawlSpider
from ..items import GoufangItem


class GoufangSpider(CrawlSpider):
    #
    # 抓取购房网 楼盘规则
    #

    name = 'goufangspider'
    allowed_domains = ['goufang.com']
    start_urls = ['http://goufang.com/']

    def parse(self, response):
        data = '''      <ul>
        <li><a href="http://dl.goufang.com/"  class="dalian">
          大连站
          </a></li>
        <li><a href="http://cc.goufang.com/"  class="changchun">
          长春站
          </a></li>
           <li><a href="http://sh.goufang.com/"  class="shanghai">
          上海站
          </a></li>
        <li><a href="http://hz.goufang.com/"  class="hangzhou">
          杭州站
          </a></li>
        <li><a href="http://sy.goufang.com/"  class="shenyang">
          沈阳站
          </a></li>
        <li><a href="http://qd.goufang.com/"  class="qingdao">
          青岛站
          </a></li>
        <li><a href="http://bj.goufang.com/"  class="beijing">
          北京站
          </a></li>
        <li><a href="http://nj.goufang.com/"  class="nanjing">
          南京站
          </a></li>
        <li><a href="http://cd.goufang.com/"  class="chengdu">
          成都站
          </a></li>
        <li><a href="http://wh.goufang.com/"  class="wuhan">
          武汉站
          </a></li>
        <li><a href="http://tj.goufang.com/"  class="tianjin">
          天津站
          </a></li>
        <li><a href="http://gz.goufang.com/">广州站</a></li>
        <li><a href="http://sz.goufang.com/">深圳站</a></li>

      </ul>'''
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取城市列表
        #

        citybox = soup.select('li > a')
        for child in citybox:
            city = child.get_text().replace('站', '').strip()
            city_url = child.get('href')
            city_id = child.get('href').replace('http://', '').replace('.goufang.com/', '')
            site_url = 'http://house.' + city_id + '.goufang.com/'
            print(city, city_id)
            citys = {
                'website': '购房者', 'web_url': 'goufang.com',
                'city': city, 'city_id': city_id, 'city_url': city_url
            }
            if city != '青岛' and city != '广州' and city != '深圳':
                yield scrapy.Request(site_url, callback=self.parse_city_area, meta=citys)

    def parse_city_area(self, response):
        meta = response.meta
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取城市下面的区域
        #

        areabox = soup.find_all('ul', class_='nav-category-content')[0]
        areas = areabox.select('li > a')
        for child in areas:
            # print(child)
            area = child.get_text()
            if area != '不限':
                area_url = child.get('href')
                print(area, area_url)
                meta['area'] = area

                yield scrapy.Request(area_url, callback=self.parse_city_estate, meta=meta)

    def parse_city_estate(self, response):
        meta = response.meta
        city_id = meta['city_id']
        area = meta['area']

        #
        # 区域 页数计算
        #

        area_num = area.split('(')[-1].replace(')', '')
        page_num = int(area_num) % 20
        if page_num == 0:
            page_num = int(area_num) / 20
        else:
            page_num = int(int(area_num) / 20) + 1

        current_url = response.url
        current_num = response.url.split('/')[5]
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 楼盘信息获取
        #

        estatebox = soup.find_all('h3', class_='xp_lm')
        for child in estatebox:
            estate = child.find_all('a')[0].get_text()
            estate_url = child.find_all('a')[0].get('href')
            estate_id = child.find_all('a')[0].get('href').replace('http://house.' + city_id + '.goufang.com/',
                                                                   '').replace('.html', '')
            # print(area, page_num, current_num, estate, estate_id, estate_url)
            item = GoufangItem()
            item['website'] = meta['website']
            item['web_url'] = meta['web_url']
            item['city'] = meta['city']
            item['city_id'] = meta['city_id']
            item['area'] = area.split('(')[0]
            item['estate'] = estate
            item['estate_id'] = estate_id
            item['estate_url'] = estate_url
            yield item

        #
        # 进行翻页
        #

        if int(current_num) < page_num:
            next_url = current_url.replace('/p/' + str(current_num) + '/', '/p/' + str(int(current_num) + 1) + '/')
            yield scrapy.Request(next_url, callback=self.parse_city_estate, meta=meta)
