# -*- coding: utf-8 -*-
import scrapy
import copy
import math
import uuid


class ErshoufangSpider(scrapy.Spider):
    name = 'ershoufang'
    allowed_domains = ['lianjia.com']
    start_urls = ['https://www.lianjia.com/city/']

    # 提取所有城市的链接
    def parse(self, response):
        province_list = response.xpath("//div[@class='city_province']")
        for province in province_list:
            province_name = province.xpath("./div/text()").get()
            city_list = province.xpath("./ul/li/a")
            for city in city_list:
                item = {}
                city_name = city.xpath("./text()").get()
                city_url = city.xpath("./@href").get()
                # 链接分为两类：没有二手房的链接和有二手房的链接
                if "fang" in city_url:
                    continue
                # print(province_name, city_name, city_url)
                # 将所有城市的链接转化为二手房的链接
                city_esf_url = city_url + "ershoufang/co32/"
                item["province"] = province_name
                item["city"] = city_name
                yield scrapy.Request(url=city_esf_url, callback=self.parse_city, meta={"item": copy.deepcopy(item)})

    def parse_city(self, response):
        city_url = response.url
        info = response.meta.get("item")
        all_count = int(response.xpath("//h2[contains(@class, 'total')]/span/text()").get())
        max_page = 100 if all_count > 30 * 100 else math.floor(all_count/30)
        template_url_list = city_url.split("/")
        template_url_list[-2] = "pg{}co32"
        template_url = "/".join(template_url_list)
        for page in range(1, max_page + 1):
            per_page_url = template_url.format(page)
            yield scrapy.Request(url=per_page_url, callback=self.parse_per_page, meta={"item": copy.deepcopy(info)})

    def parse_per_page(self, response):
        info = response.meta.get("item")
        houses = response.xpath("//ul[@class='sellListContent']/li")
        for house in houses:
            house_url = house.xpath("./a/@href").get()
            info["url"] = house_url
            yield scrapy.Request(url=house_url, callback=self.parse_house_detail, meta={"item": copy.deepcopy(info)})

    def parse_house_detail(self, response):
        item = response.meta.get("item")
        # 描述
        try:
            item["desc"] = response.xpath("//h1[@class='main']/text()").get()
        except:
            item["desc"] = ""
        # 总价
        try:
            item["totalPrice"] = response.xpath("//span[@class='total']/text()").get()
        except:
            item["totalPrice"] = ""
        # 单价
        try:
            item["unitPrice"] = response.xpath("//span[@class='unitPriceValue']/text()").get()
        except:
            item["unitPrice"] = ""
        # 面积
        try:
            item["area"] = response.xpath("//div[@class='area']/div[@class='mainInfo']/text()").get()
        except:
            item["area"] = ""
        # 类型
        try:
            item["type"] = response.xpath("//div[@class='area']/div[@class='subInfo']/text()").get()
        except:
            item["type"] = ""
        # 朝向
        try:
            item["aspect"] = response.xpath("//div[@class='type']/div[@class='mainInfo']/text()").get()
        except:
            item["aspect"] = ""
        # 房间
        try:
            item["room"] = response.xpath("//div[@class='room']/div[@class='mainInfo']/text()").get()
        except:
            item["room"] = ""
        # 楼层
        try:
            item["floor"] = response.xpath("//div[@class='room']/div[@class='subInfo']/text()").get()
        except:
            item["floor"] = ""
        # 所在区
        try:
            item["region"] = response.xpath("//div[@class='areaName']/span[@class='info']/a[1]/text()").get()
        except:
            item["region"] = ""
        # 具体地址
        try:
            item["around"] = response.xpath("//div[@class='communityName']/a[contains(@class,'info')]/text()").get()
        except:
            item["around"] = ""
        address = item["province"] + item["city"] + item["region"] + item["around"]
        item["address"] = address
        item["_id"] = uuid.uuid1().hex
        item = copy.deepcopy(item)
        return item
