# coding: utf-8

import bs4
from scrapy.contrib.spiders.crawl import CrawlSpider
from ..items import QqItem
import scrapy
import codecs


class QQ2Spider(CrawlSpider):

    #
    # 腾讯房产 规则
    #

    name = 'qqspider'
    allowed_domains = ['house.qq.com']
    start_urls = ['http://house.qq.com/']

    def parse(self, response):
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 腾讯是 ajax 请求url
        #

        citybox = soup.select('.scrollContent')[0]
        city_box = citybox.find_all('a')
        for child in city_box:
            city = child.get_text()
            city_id = child.get('href').replace('http://', '').replace('.house.qq.com', '').replace('/', '')
            site_url = 'http://db.house.qq.com/index.php?mod=search&act=newsearch&city=' + city_id + '&showtype=1&unit=1&all=&page_no=1&CA=&mod=search&city=' + city_id
            if city == '黄石':
                site_url = 'http://db.house.qq.com/index.php?mod=search&act=newsearch&city=huangshi&showtype=1&unit=1&all=&page_no=1&CA=&mod=search&city=huangshi'
                city_id = 'huangshi'

            citys = {
                'website': '腾讯', 'web_url': 'house.qq.com',
                'city': city, 'city_id': city_id
            }
            if city != '齐齐哈尔' and city != '松源' and city != '汕尾':
                yield scrapy.Request(site_url, callback=self.parse_city_area, meta=citys)

    def parse_city_area(self, response):
        meta = response.meta
        city_id = meta['city_id']
        data = response.body
        current_url = response.url

        #
        # 进行unicode编码转换 再进行二次加工 取出楼盘信息
        #

        result = data.decode('raw-unicode-escape')
        html = codecs.escape_decode(bytes(result, 'utf-8'))[0].decode('utf-8')
        if html:
            pagenums = html.split(';')[-2].strip()
            estatelist = html.replace(pagenums, '').replace('var search_result = "', '').strip().replace('"',
                                                                                                         '').replace(
                    ';', '').replace('&lt;', '<').replace('&gt;', '>').replace('\/', '/')
            soup = bs4.BeautifulSoup(estatelist, 'html5lib')
            estatebox = soup.select('.title > h2 > a')
            for child in estatebox:
                estate = child.get_text()
                estate_url = child.get('href')
                estate_id = child.get('href').split('/')[-2]
                site_url = estate_url + '/info.html'
                meta['estate'] = estate
                meta['estate_id'] = estate_id
                meta['estate_url'] = estate_url
                yield scrapy.Request(site_url, callback=self.parse_get_area, meta=meta)

            #
            # 进行翻页 再去ajax 请求
            #

            pages = soup.select('.page > a')
            if pages:
                next_page = soup.select('.page > a')[-1].get_text().replace('>', '').strip()
                if next_page == '下一页':
                    nextflag = soup.select('.page > a')[-1].get('href')
                    site_url = 'http://db.house.qq.com/index.php?mod=search&act=newsearch&city=' + city_id + '&showtype=1&unit=1&all=&page_no='
                    site2_url = '&CA=&mod=search&city=' + city_id
                    current_num = current_url.replace(site_url, '').replace(site2_url, '').strip()
                    next_num = int(current_num) + 1
                    next_url = 'http://db.house.qq.com/index.php?mod=search&act=newsearch&city=' + city_id + '&showtype=1&unit=1&all=&page_no=' + str(
                            next_num) + '&CA=&mod=search&city=' + city_id
                    if nextflag:
                        yield scrapy.Request(next_url, callback=self.parse_city_area, meta=meta)


    def parse_get_area(self, response):
        meta = response.meta
        data = response.body
        soup = bs4.BeautifulSoup(data, 'html5lib')

        #
        # 楼盘详细页面取得 所属区域
        #

        areabox = soup.select('.bd > ul > li > span')
        for child in areabox:
            area_flag = child.get_text().strip()
            if area_flag == '所属区县':
                area = child.find_next_sibling('p').get_text()
                item = QqItem()
                item['website'] = meta['website']
                item['web_url'] = meta['web_url']
                item['city'] = meta['city']
                item['city_id'] = meta['city_id']
                item['area'] = area
                item['estate'] = meta['estate']
                item['estate_id'] = meta['estate_id']
                item['estate_url'] = meta['estate_url']
                yield item
