# coding:utf-8

from scrapy.contrib.spiders import CrawlSpider, Rule
from ..items import SouhuItem
import scrapy
import bs4


# 直接从全部里面抓取（比较全）

class Souhu2Spider(CrawlSpider):

    #
    # 搜狐网 规则
    #

    name = 'souhuspider'
    allowed_domains = ['focus.cn']
    start_urls = ['http://house.focus.cn/']

    def parse(self, response):
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取 所有城市
        #

        citylist = soup.select('.cityAreaBoxCen > ul > li > a')

        for child in citylist:
            city = child.get_text()
            city_id = child.get('href').split('/')[2].replace('.focus.cn', '')
            city_url = child.get('href')
            site_url = child.get('href') + 'search/index.html'
            print(city, city_id)
            citys = {
                'website': '搜狐', 'web_url': 'www.focus.cn',
                'city': city, 'city_id': city_id, 'city_url': city_url
            }

            yield scrapy.Request(site_url, callback=self.parse_area_estate, meta=citys)

    def parse_area_estate(self, response):
        meta = response.meta
        city = meta['city']
        city_url = meta['city_url']
        data = response.body
        current_url = response.url
        soup = bs4.BeautifulSoup(data, 'html.parser')

        #
        # 获取 城市 下面区域及楼盘信息
        #

        areabox = soup.select('.pos-title')
        if areabox:
            estates = soup.select('.lp-t-title')
            if estates:
                estatelist = soup.select('.lp-t-title')
                for child in estatelist:
                    estate = child.a.get_text()
                    estate_url = child.a.get('href')
                    estate_id = child.a.get('href').split('/')[-1].replace('.html', '')
                    areas = child.find_next_sibling('p')
                    area = areas.get_text().strip().split(' ')[0]
                    print(city, area, estate, estate_id, estate_url)
                    item = SouhuItem()
                    item['website'] = meta['website']
                    item['web_url'] = meta['web_url']
                    item['city'] = meta['city']
                    item['city_id'] = meta['city_id']
                    item['area'] = area
                    item['estate'] = estate
                    item['estate_id'] = estate_id
                    item['estate_url'] = estate_url
                    yield item
            else:
                estatelist = soup.select('.title')
                for child in estatelist:
                    estate = child.a.get_text()
                    estate_url = child.a.get('href')
                    estate_id = child.a.get('href').split('/')[-1].replace('.html', '')
                    areas = child.find_next_sibling('p')
                    area = areas.get_text().strip().split(' ')[0]
                    print(city, area, estate, estate_id, estate_url)
                    item = SouhuItem()
                    item['website'] = meta['website']
                    item['web_url'] = meta['web_url']
                    item['city'] = meta['city']
                    item['city_id'] = meta['city_id']
                    item['area'] = area
                    item['estate'] = estate
                    item['estate_id'] = estate_id
                    item['estate_url'] = estate_url
                    yield item

            #
            # 翻页
            #

            flag = soup.select('.no-result')
            if flag:
                pass
            else:
                totle = soup.select('.s-m-fr')
                if totle != '':
                    totle_num = totle[0].find_all('strong')[0].get_text()
                    page = int(totle_num) % 20
                    if page == 0:
                        totle_page = int(totle_num) / 20
                    else:
                        totle_page = int(int(totle_num) / 20) + 1

                    if '.html' in current_url:

                        current_pages = current_url.split('/')[-1].replace('.html', '')
                        if 'p' in current_pages:
                            current_page = int(current_pages.split('p')[-1])
                            if current_page < totle_page:
                                print('222', current_page, totle_num)
                                next_url = current_url.replace('_p' + str(current_page) + '.html',
                                                               '_p' + str(current_page + 1) + '.html')
                                yield scrapy.Request(next_url, callback=self.parse_area_estate, meta=meta)

                        else:
                            next_url = current_url.replace(current_pages, current_pages + '_p2')
                            print('111', next_url)
                            yield scrapy.Request(next_url, callback=self.parse_area_estate, meta=meta)
                    else:
                        current_pages = current_url.split('/')[-2]
                        print('000', current_pages)
                        if current_pages != 'loupan':
                            if 'p' in current_pages:
                                current_pages = current_url.split('/')[-2]
                                current_page = int(current_pages.split('p')[-1])
                                if current_page < totle_page:
                                    next_url = current_url.replace('/p' + str(current_page),
                                                                   '/p' + str(current_page + 1))
                                    print('222', next_url)
                                    yield scrapy.Request(next_url, callback=self.parse_area_estate, meta=meta)

                        else:
                            next_pages = current_pages + '/p2'
                            next_url = current_url.replace(current_pages, next_pages)
                            print('333', current_url, next_url)
                            yield scrapy.Request(next_url, callback=self.parse_area_estate, meta=meta)
        else:
            site_url = city_url + 'loupan/'
            yield scrapy.Request(site_url, callback=self.parse_area_estate, meta=meta)
