# coding: utf-8

import bs4
from scrapy.contrib.spiders.crawl import CrawlSpider, Rule
import scrapy
from ..items import LianjiaItem


class LianjiaSpider(CrawlSpider):
    #
    # 链家网站 规则
    #

    name = 'lianjiaspider'
    allowed_domains = ['lianjia.com']
    start_urls = ['http://bj.fang.lianjia.com/']

    def parse(self, response):
        data = '''<ul><li class="clear"><span class="code-title fl">B</span>
        <div class="city-enum fl"><a href="http://bj.fang.lianjia.com" title="北京房产网">北京</a></div></li>
        <li class="clear"><span class="code-title fl">C</span><div class="city-enum fl">
        <a href="http://cd.fang.lianjia.com" title="成都房产网">成都</a><a href="http://cq.fang.lianjia.com" title="重庆房产网">重庆</a>
        <a href="http://cs.fang.lianjia.com" title="长沙房产网">长沙</a></div></li>
        <li class="clear"><span class="code-title fl">D</span>
        <div class="city-enum fl"><a href="http://dl.fang.lianjia.com" title="大连房产网">大连</a></div></li>
        <li class="clear"><span class="code-title fl">G</span>
        <div class="city-enum fl"><a href="http://gz.fang.lianjia.com" title="广州房产网">广州</a></div></li>
        <li class="clear"><span class="code-title fl">H</span><div class="city-enum fl"><a href="http://hz.fang.lianjia.com" title="杭州房产网">杭州</a>
        <a href="http://you.lianjia.com/hk" title="海口房产网">海口</a></div></li>
        <li class="clear"><span class="code-title fl">J</span><div class="city-enum fl">
        <a href="http://jn.fang.lianjia.com" title="济南房产网">济南</a></div></li><li class="clear"><span class="code-title fl">L</span>
        <div class="city-enum fl"><a href="http://you.lianjia.com/ls" title="陵水房产网">陵水</a></div></li>
        <li class="clear"><span class="code-title fl">N</span><div class="city-enum fl">
        <a href="http://nj.fang.lianjia.com" title="南京房产网">南京</a></div></li><li class="clear"><span class="code-title fl">Q</span>
        <div class="city-enum fl"><a href="http://qd.fang.lianjia.com" title="青岛房产网">青岛</a>
        <a href="http://you.lianjia.com/qh" title="琼海房产网">琼海</a></div></li><li class="clear">
        <span class="code-title fl">S</span><div class="city-enum fl"><a href="http://sh.fang.lianjia.com" title="上海房产网">上海</a>
        <a href="http://sz.fang.lianjia.com" title="深圳房产网">深圳</a><a href="http://su.fang.lianjia.com" title="苏州房产网">苏州</a>
        <a href="http://sjz.fang.lianjia.com" title="石家庄房产网">石家庄</a><a href="http://you.lianjia.com/san" title="三亚房产网">三亚</a>
        <a href="http://sy.fang.lianjia.com" title="沈阳房产网">沈阳</a></div></li><li class="clear"><span class="code-title fl">T</span>
        <div class="city-enum fl"><a href="http://tj.fang.lianjia.com" title="天津房产网">天津</a></div></li>
        <li class="clear"><span class="code-title fl">W</span><div class="city-enum fl"><a href="http://wh.fang.lianjia.com" title="武汉房产网">武汉</a>
        <a href="http://you.lianjia.com/wc" title="文昌房产网">文昌</a><a href="http://you.lianjia.com/wn" title="万宁房产网">万宁</a></div></li>
        <li class="clear"><span class="code-title fl">X</span><div class="city-enum fl"><a href="http://xm.fang.lianjia.com" title="厦门房产网">厦门</a>
        <a href="http://xa.fang.lianjia.com" title="西安房产网">西安</a></div></li><li class="clear"><span class="code-title fl">Y</span>
        <div class="city-enum fl"><a href="http://yt.fang.lianjia.com" title="烟台房产网">烟台</a></div></li></ul>'''
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 城市列表
        #

        citybox = soup.select('.city-enum > a ')
        for child in citybox:
            city = child.get_text()
            city_url = child.get('href')
            city_url2 = child.get('href') + '/loupan/'
            city_id = child.get('href').replace('http://', '').replace('.fang.lianjia.com', '')
            citys = {
                'website': '链家', 'web_url': 'lianjia.com',
                'city': city, 'city_id': city_id, 'city_url': city_url
            }
            if city == '海口' or city == '琼海' or city == '三亚':
                city_id = child.get('href').replace('http://you.lianjia.com/', '')
                citys['city_id'] = city_id
                yield scrapy.Request(city_url2, callback=self.parse_very_city, meta=citys)
            else:
                yield scrapy.Request(city_url2, callback=self.parse_city_area, meta=citys)

    def parse_very_city(self, response):
        meta = response.meta
        data = response.body
        city_url = meta['city_url']
        soup = bs4.BeautifulSoup(data, 'lxml')
        estatebox = soup.select('.lp_m')
        for child in estatebox:
            estates = child.find_all('p', class_='bt')[0]
            estate = estates.a.get_text()
            estate_url = 'http://you.lianjia.com' + estates.a.get('href')
            estate_id = estate_url.split('/')[-1].replace('.html', '')
            item = LianjiaItem()
            item['website'] = meta['website']
            item['web_url'] = meta['web_url']
            item['city'] = meta['city']
            item['city_id'] = meta['city_id']
            item['area'] = ''
            item['estate'] = estate
            item['estate_id'] = estate_id
            item['estate_url'] = estate_url
            yield item

        pages = soup.select('.page_box')[0]
        if pages:
            now_page = pages.find_all('a', class_='on')[0].get_text()
            next_page = pages.find_all('a')[-2].get_text()
            next_num = int(next_page)
            now_num = int(now_page)
            if now_num < next_num:
                site_url = city_url + '/loupan/pg' + str(now_num + 1) + '/'
                yield scrapy.Request(site_url, callback=self.parse_very_city, meta=meta)


    def parse_city_area(self, response):
        meta = response.meta
        data = response.body
        city = meta['city']
        city_url = meta['city_url']
        soup = bs4.BeautifulSoup(data, 'lxml')
        flag = soup.find_all('div', class_='option-list')
        if flag:
            areabox = soup.find_all('div', class_='option-list')[0]
            areas = areabox.select('a')
            for child in areas:
                area = child.get_text()
                area_url = city_url + child.get('href')
                meta['area'] = area
                meta['area_url'] = child.get('href')
                if area != '不限':
                    yield scrapy.Request(area_url, callback=self.parse_area_estate, meta=meta)

    def parse_area_estate(self, response):
        meta = response.meta
        data = response.body
        city = meta['city']
        area = meta['area']
        city_url = meta['city_url']
        area_url = meta['area_url']
        soup = bs4.BeautifulSoup(data, 'lxml')
        estatebox = soup.select('.col-1 > h2 > a')

        for child in estatebox:
            estate = child.get_text()
            estate_url = city_url + child.get('href')
            estate_id = child.get('href').replace('/loupan/', '').replace('/', '')
            item = LianjiaItem()
            item['website'] = meta['website']
            item['web_url'] = meta['web_url']
            item['city'] = city
            item['city_id'] = meta['city_id']
            item['area'] = area
            item['estate'] = estate
            item['estate_id'] = estate_id
            item['estate_url'] = estate_url
            yield item


        pages = soup.select('.page-box')
        if pages:
            next_page = soup.select('.page-box ')[0].get('page-data').split(',')[0].split(':')[1]
            now_page = soup.select('.page-box ')[0].get('page-data').split(',')[1].split(':')[1].replace('}', '')
            if next_page:
                page_num = int(next_page.strip())
                now_num = int(now_page.strip())
                if now_num < page_num:
                    next_url = city_url + area_url + 'pg' + str(now_num + 1) + '/'
                    yield scrapy.Request(next_url, callback=self.parse_area_estate, meta=meta)
