import scrapy
from copy import deepcopy
from house_data.items import HouseDataItem


class HouseSpider(scrapy.Spider):
    name = "house"
    allowed_domains = ["lianjia.com"]
    start_urls = ["https://dali.lianjia.com/ershoufang/"]

    def parse(self, response, **kwargs):
        data_list = response.xpath('//ul[@class="sellListContent"]//li[@ class="clear LOGVIEWDATA LOGCLICKDATA"]')

        # 抓取第一页主页面和详情页信息
        for data in data_list:
            item = HouseDataItem()
            item["title"] = data.xpath("./div[1]/div[1]/a/text()").extract_first()
            item["place"] = "-".join(data.xpath("./div[1]/div[2]/div[1]//a/text()").extract())
            item["house_info"] = data.xpath("./div[1]/div[3]/div[1]/text()").extract_first()
            item["total_prices"] = data.xpath("./div[1]/div[6]/div[1]//span/text()").extract_first() + "万"
            item["unit_price"] = data.xpath("./div[1]/div[6]/div[2]//span/text()").extract_first()
            href = data.xpath("./div[1]/div[1]/a/@href").extract_first()
            yield scrapy.Request(url=href, callback=self.parse_detail, meta={"item":deepcopy(item)})

        # 翻页抓取
        url = "https://dali.lianjia.com/ershoufang/pg{}/"
        for num in range(2, 4):
            next_url = url.format(num)
            yield scrapy.Request(url=next_url, callback=self.parse)

    # 解析详情页
    def parse_detail(self, response):
        item = response.meta["item"]
        item["time"] = response.xpath('//div[@class="transaction"]/div[2]/ul/li[1]/span[2]/text()').extract_first()
        item["house_type"] = response.xpath('//div[@class="transaction"]/div[2]/ul/li[2]/span[2]/text()').extract_first()
        item["house_data"] = response.xpath('//div[@class="transaction"]/div[2]/ul/li[7]/span[2]/text()').extract_first().strip()
        print(item)