# coding:utf-8

import scrapy
import bs4
from scrapy.contrib.spiders import CrawlSpider
from ..items import AnjukeItem

class AnjukeSpider(CrawlSpider):
    #
    # 抓取安居客网站规则
    #

    name = 'anjukespider'
    allowed_domains = ['anjuke.com']
    start_urls = ['http://www.anjuke.com/sy-city.html']

    def parse(self, response):
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取网站所有城市
        #

        leftbox = soup.select('.left_side > dl ')
        rightbox = soup.select('.right_side > dl ')
        citybox = []
        for child in leftbox:
            cols = child.find_all('a')
            for col in cols:
                city = col.get_text()
                city_url = col.get('href')
                city_id = col.get('href').replace('http://', '').replace('.anjuke.com', '')
                citys = [city, city_id, city_url]
                citybox.append(citys)
                # print(city, city_id, city_url)
        for child in rightbox:
            cols = child.find_all('a')
            for col in cols:
                city = col.get_text()
                city_url = col.get('href')
                city_id = col.get('href').replace('http://', '').replace('.anjuke.com', '')
                if city != '吴江':
                    citys = [city, city_id, city_url]
                    citybox.append(citys)
                    # print(city, city_id, city_url)
        for arr in citybox:
            city = arr[0]
            city_id = arr[1]
            city_url = arr[2]
            print(city, city_id, city_url)
            citys = {
                'website': '安居客', 'web_url': 'anjuke.com',
                'city': city, 'city_id': city_id,
            }
            if city == '安阳':
                yield scrapy.Request(city_url, callback=self.parse_getcityPage_url, meta=citys)

    def parse_getcityPage_url(self, response):
        meta = response.meta
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 进入城市新楼盘页面
        #

        city_urls = soup.find_all('a', class_='a_navnew')[1]
        xinf = city_urls.get_text().strip()
        site_url = city_urls.get('href')
        if xinf == '新 房':
            yield scrapy.Request(site_url, callback=self.parse_cityArea_url, meta=meta)

    def parse_cityArea_url(self, response):
        meta = response.meta
        data = response.body
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取城市下面的区域
        #

        areaboxs = soup.select('.filter')[0]
        areabox = areaboxs.find_all('a')
        for child in areabox:
            area = child.get_text()
            area_url = child.get('href')
            # print(area, area_url)
            meta['area'] = area
            yield scrapy.Request(area_url, callback=self.parse_city_estate, meta=meta)

    def parse_city_estate(self, response):
        meta = response.meta
        data = response.body
        area = meta['area']
        soup = bs4.BeautifulSoup(data, 'lxml')

        #
        # 获取楼盘信息
        #

        estates = soup.find_all('a', class_='items-name')
        for child in estates:
            estate = child.get_text()
            estate_url = child.get('href')
            estate_id = estate_url.split('/')[-1].replace('.html', '')
            # print(area, estate, estate_id)
            item = AnjukeItem()
            item['website'] = meta['website']
            item['web_url'] = meta['web_url']
            item['city'] = meta['city']
            item['city_id'] = meta['city_id']
            item['area'] = area
            item['estate'] = estate
            item['estate_id'] = estate_id
            item['estate_url'] = estate_url
            print(area, estate, estate_url)
            yield item

        #
        # 进行翻页
        #

        next_pages = soup.find_all('a', class_='next-page')
        if next_pages:
            next_page = soup.find_all('a', class_='next-page')[0]
            if next_page:
                next_url = next_page.get('href')
                #print('111', area, next_url)
                yield scrapy.Request(next_url, callback=self.parse_city_estate, meta=meta)
