# coding: utf-8

import bs4
from scrapy.contrib.spiders.crawl import CrawlSpider, Rule
import scrapy
from ..items import QfangItem


class QFangSpider(CrawlSpider):

    #
    # Q房网 规则
    #

    name = 'qfangspider'
    allowed_domains = ['qfang.com']
    start_urls = ['http://beijing.qfang.com/']

    def parse(self, response):
        data = '''<ul class="cities-opts clearfix">
    <li class="clearfix">
      <em class="icons">B</em>
      <p>
        <a class="highlight" href="http://beijing.qfang.com">北京</a>
        <a href="http://baoshan.qfang.com">保山</a>

      </p>
    </li>
    <li class="clearfix">
      <em class="icons">N</em>
      <p>
        <a class="highlight" href="http://nanjing.qfang.com">南京</a>
        <a href="http://nanning.qfang.com">南宁</a>
        <a href="http://nantong.qfang.com">南通</a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">C</em>
      <p>
        <a href="http://chengdu.qfang.com">成都</a>
        <a href="http://chongqing.qfang.com">重庆</a>
        <a href="http://changsha.qfang.com">长沙</a>
        <a href="http://chuxiong.qfang.com">楚雄</a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">P</em>
      <p>
        <a href="http://pingdingshan.qfang.com">平顶山</a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">D</em>
      <p>
        <a href="http://dongguan.qfang.com">东莞</a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">Q</em>
      <p>
        <a class="highlight" href="http://qingdao.qfang.com">青岛 </a>
      </p>
    </li>
    <li class="learfix">
      <em class="icons">F</em>
      <p>
        <a class="highlight" href="http://foshan.qfang.com">佛山</a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">S</em>
      <p>
        <a class="highlight" href="http://shenzhen.qfang.com">深圳</a>
        <a class="highlight" href="http://shanghai.qfang.com">上海</a>
        <a class="highlight" href="http://suzhou.qfang.com">苏州</a>
        <a href="http://sanya.qfang.com">三亚</a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">G</em>
      <p>
        <a class="highlight" href="http://guangzhou.qfang.com">广州</a>
      	<a href="http://ganzhou.qfang.com/">赣州</a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">T</em>
      <p>
        <a href="http://taiyuan.qfang.com">太原 </a>
        <a href="http://taicang.qfang.com">太仓 </a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">H</em>
      <p>
        <a class="highlight" href="http://hangzhou.qfang.com">杭州</a>
        <a href="http://huizhou.qfang.com">惠州</a>
        <a href="http://honghe.qfang.com">红河</a>
        <a href="http://hefei.qfang.com">合肥</a>
        <a href="http://huzhou.qfang.com">湖州</a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">W</em>
      <p>
        <a href="http://wuhan.qfang.com">武汉</a>
        <a href="http://wuxi.qfang.com">无锡</a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">J</em>
      <p>
        <a href="http://jiaxing.qfang.com">嘉兴</a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">X</em>
      <p>
        <a class="highlight" href="http://hk.qfang.com">香港</a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">K</em>
      <p>
        <a href="http://kunming.qfang.com">昆明</a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">Z</em>
      <p>
        <a class="highlight" href="http://zh.qfang.com">珠海</a>
        <a class="highlight" href="http://zhongshan.qfang.com">中山</a>
        <a href="http://zibo.qfang.com">淄博</a>
      </p>
    </li>
    <li class="clearfix">
      <em class="icons">L</em>
      <p>
        <a href="http://langfang.qfang.com">廊坊</a>
      </p>
    </li>
  </ul>'''
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取所有城市
        #

        citybox = soup.select('.clearfix > p > a')

        for child in citybox:
            city = child.get_text()
            city_url = child.get('href')
            city_id = child.get('href').replace('http://', '').replace('.qfang.com', '')
            print(city, city_id, city_url)
            site_url = child.get('href') + '/newhouse/list'
            citys = {
                'website': 'Q房网', 'web_url': 'qfang.com',
                'city': city, 'city_id': city_id, 'city_url': city_url
            }
            yield scrapy.Request(site_url, callback=self.parse_area_estate, meta=citys)

    def parse_area_estate(self, response):
        meta = response.meta
        city_url = meta['city_url']
        data = response.body
        soup = bs4.BeautifulSoup(data,'lxml')

        #
        # 获取 城市下面的区域
        #

        areabox = soup.select('.search-area-detail > li > a')
        for child in areabox:
            area = child.get_text()
            meta['area'] = area
            if area != '不限':
                area_url = city_url+child.get('href')
                print(area, area_url)
                yield scrapy.Request(area_url, callback=self.parse_city_estate,meta=meta)

    def parse_city_estate(self, response):
        meta = response.meta
        city_url = meta['city_url']
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取楼盘信息
        #

        estatebox = soup.select('.house-title')
        for child in estatebox:
            estate = child.a.get_text().strip()
            estate_url = city_url+child.a.get('href')
            estate_id = child.a.get('href').replace('/newhouse/', '').replace('?insource=new_list', '')
            item = QfangItem()
            item['website'] = meta['website']
            item['web_url'] = meta['web_url']
            item['city'] = meta['city']
            item['city_id'] = meta['city_id']
            item['area'] = meta['area']
            item['estate'] = estate
            item['estate_id'] = estate_id
            item['estate_url'] = estate_url
            print( estate, estate_id, estate_url)
            yield item

        pages = soup.select('.btns_turnpage')
        #print(pages)
        if pages:
            next_page = soup.select('.btns_turnpage > a')[0]
            page = next_page.get_text().strip()
            #print(page)
            if page == '下一页':
                next_url = city_url + next_page.get('href')
                #print(next_url)
                if next_url!= 'javascript:;':
                    yield scrapy.Request(next_url,callback=self.parse_city_estate,meta=meta)

