import scrapy
import requests
import re
from lianjia.items import LianjiaItem
import time
import json


class LjSpider(scrapy.Spider):
    name = 'lj'
    allowed_domains = ['sz.lianjia.com']
    # start_urls = ['https://sz.lianjia.com/zufang']
    base_url = "https://sz.lianjia.com/zufang/pg{}"

    # 构造翻页所使用的链接
    def start_requests(self):
        response = requests.get(self.base_url.format(1)).text
        # 获得房源数量
        max_rooms = int(re.findall(
            "<span class=\"content__title--hl\">(.*?)</span>", response, re.S)[0])
        max_pg = int(max_rooms / 30) + 1
        # 构造所有的url
        urls = [self.base_url.format(i) for i in range(1, max_pg + 1)]
        # 回调请求页面,获得数据
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)

    # 获取rooms页面的数据
    def parse(self, response):
        # 每个room的块
        # 因为广告的实在是太不规范了，所以放弃广告的部分，
        div_ls = response.xpath(
            "//div[@class='content__list']/div[contains(@data-house_code,'SZ')]")
        try:
            for div in div_ls:
                item = LianjiaItem()
                item["type"] = None
                item["room_name"] = None
                item["room_dir"] = None
                item["room_addr_qu"] = None
                item["room_addr_zhan"] = None
                item["room_addr_xq"] = None
                item["room_area"] = None
                item["room_type"] = None
                item["room_price"] = None
                item["room_floor"] = None
                item["room_broker"] = None
                item["broker_no"] = None
                # 提取数据
                title_ls = div.xpath(".//p[@class='content__list--item--title']/a/text()").extract_first(
                ).strip().split() if div.xpath(".//p[@class='content__list--item--title']/text()") else None
                if title_ls:
                    type_room_name = title_ls[0].split("·")
                    # 有租房类型
                    if len(type_room_name) > 1:
                        item["type"] = type_room_name[0]
                        item["room_name"] = type_room_name[1] if len(
                            type_room_name) <= 2 else type_room_name[1]+type_room_name[-1]
                    # 没有租房类型
                    else:
                        item["room_name"] = type_room_name[0]
                    item["room_dir"] = title_ls[-1]
                addr_ls = div.xpath(".//p[@class='content__list--item--des']/a/text()").extract(
                ) if div.xpath(".//p[@class='content__list--item--des']/a/text()") else None
                if addr_ls:
                    item["room_addr_qu"] = addr_ls[0]
                    item["room_addr_zhan"] = addr_ls[1]
                    item["room_addr_xq"] = addr_ls[-1]
                desc_ls = div.xpath(".//p[@class='content__list--item--des']/text()").extract(
                )[-4:] if div.xpath(".//p[@class='content__list--item--des']/text()") else None
                if desc_ls:
                    item["room_area"] = desc_ls[0].strip()
                    item["room_type"] = desc_ls[-2].strip()
                    item["room_price"] = div.xpath(".//span[@class='content__list--item-price']/em/text()").extract_first(
                    ) if div.xpath(".//span[@class='content__list--item-price']/em/text()") else None
                desc_url = div.xpath(".//p[@class='content__list--item--title']/a/@href").extract_first(
                ) if div.xpath(".//p[@class='content__list--item--title']/a/@href") else None
                if desc_url:
                    desc_url = "https://sz.lianjia.com" + \
                        div.xpath(
                            ".//p[@class='content__list--item--title']/a/@href").extract_first()
                    yield scrapy.Request(url=desc_url, callback=self.parse_item, meta={"item": item}, dont_filter=True)
                else:
                    yield item
        except Exception as ex:
            # 将异常信息存入log文件
            with open("error.log", "a", encoding="utf8") as f:
                f.write(time.time() + " " + str(ex) + "\n")

    # 提取详情页数据
    def parse_item(self, response):
        item = response.meta["item"]
        item["room_floor"] = response.xpath("//li[@class='floor']/span/text()").extract()[-1].split(
            " ")[-1].split("/")[0] if response.xpath("//li[@class='floor']/span/text()") else None
        item["room_broker"] = response.xpath("//span[@class='contact_name']/text()").extract_first(
        ) if response.xpath("//span[@class='contact_name']/text()") else None
        item["broker_no"] = response.xpath("//div[@class='duty-pic duty-pic-ts7']/span/text()").extract_first(
        ) if response.xpath("//div[@class='duty-pic duty-pic-ts7']/span/text()") else None
        yield item
