# -*- coding: utf-8 -*-
import scrapy
from ..items import LianjiaItem

class LianjiaSpider(scrapy.Spider):
    name = 'lianjia'
    allowed_domains = ['nc.lianjia.com']
    start_urls = ['http://nc.lianjia.com/ershoufang/']

    def parse(self, response):
        qu_list = response.xpath('//div[@data-role="ershoufang"]/div/a')
        for qu in qu_list:
            meta = {}
            meta['qu_name'] = qu.xpath('./text()').extract_first()
            meta['qu_url'] =  qu.xpath('./@href').extract_first()
            meta['qu_url'] = response.urljoin(meta['qu_url'])
            meta['page_num'] = 1
            yield scrapy.Request(url = meta['qu_url'],meta={'temp':meta},callback=self.get_house)



    def get_jiedao(self,response):
        temp = response.meta['temp']
        jiedao_list = response.xpath('//div[@data-role="ershoufang"]/div[2]/a')
        for jiedao in jiedao_list:
            jiedao_url = jiedao.xpath('./@href').extract_first()
            jiedao_url = response.urljoin(jiedao_url)
            jiedao_name = jiedao.xpath('./text()').extract_first()
            temp['jiedao_name'] = jiedao_name
            temp['jiedao_url'] = jiedao_url
            temp['page_num'] = 1
            yield scrapy.Request(url=jiedao_url,meta={'temp':temp},callback=self.get_house)
            break

    def get_house(self,response):
        temp = response.meta['temp']
        if response.status != 200:
            return
        houses = response.xpath('//ul[@class="sellListContent"]/li')

        for house in houses:
            item = LianjiaItem()
            item['qu_name'] = temp['qu_name']
            item['qu_url'] = temp['qu_url']
            item['detail_url'] = house.xpath('./a[1]/@href').extract_first()
            item['image_url'] = house.xpath('./a[1]/img[@class="lj-lazy"]/@data-original').extract_first()
            info = house.xpath('./div[contains(@class,"info")]')
            item['house_name'] = info.xpath('./div[@class="title"]/a/text()').extract_first()
            item['village_name'] = info.xpath('.//div[@class="positionInfo"]/a[1]/text()').extract_first()
            house_info = info.xpath('.//div[@class="houseInfo"]/text()').extract_first()
            house_info = house_info.split('|')
            if len(house_info)>=2:
                item['house_layout'] = house_info[0]
                item['house_space'] = house_info[1].strip().replace('平米','')
            else:
                item['house_layout'] = None
                item['house_space'] = None
            item['total_price'] = info.xpath('.//div[@class="totalPrice"]/span/text()').extract_first()
            item['unit_price'] = info.xpath('.//div[@class="unitPrice"]/@data-price').extract_first()
            yield item


        page_num = temp['page_num']
        page_num += 1
        if page_num == 2:
            next_page = response.urljoin('pg%d'%page_num)
        else:
            url = response.request.url
            url = url.split('/')
            url[-2] = 'pg%d'%page_num
            next_page = '/'.join(url)
        temp['page_num'] = page_num
        yield scrapy.Request(url=next_page, meta={'temp': temp}, callback=self.get_house)






