import scrapy
import random, time
from ..settings import SLEEP_SATRT, SLEEP_STOP
from ..items import HouseSpiderItem
start = SLEEP_SATRT
stop = SLEEP_STOP


class FangSpider(scrapy.Spider):
    name = 'fang'
    allowed_domains = ['fang.com']
    # start_urls = ['http://fang.com/']
    baseUrl = "https://zu.fang.com"

    # 每个区域的url
    urlDir = {
        "朝阳": "/house-a01/",
        "海淀": "/house-a00/",
        "丰台": "/house-a06/",
        "东城": "/house-a02/",
        "西城": "/house-a03/",
        "石景山": "/house-a07/",
        "昌平": "/house-a012/",
        "大兴": "/house-a0585/",
        "通州": "/house-a010/",
        "顺义": "/house-a011/",
        "房山": "/house-a08/",
        "密云": "/house-a013/",
        "门头沟": "/house-a09/",
        "怀柔": "/house-a014/",
        "延庆": "/house-a015/",
        "平谷": "/house-a016/"
    }
    region = "不限"
    # 页码
    page = 30

    def start_requests(self):
        """
        爬虫入口，用于生成所有的url地址
        :return:
        """
        urlList = []
        print("==========urlList===================")
        print(urlList)
        print("==========urlList===================")
        for name in self.urlDir.keys():
            for index in range(self.page):
                if index == 0:
                    urlList.append(self.baseUrl + self.urlDir[name] + "i3/")
                else:
                    urlList.append(self.baseUrl + self.urlDir[name] + "i3" + str(index + 1) + "/")
        for url in urlList[:2]:
            time.sleep(self.random_sleep_time(start, stop))
            yield scrapy.Request(url=url, dont_filter=True)

    def parse(self, response):
        """
        列表页的解析函数，用来获取列表页的相关信息
        :param response: 列表页的响应对象
        :return:
        """
        dl_list = response.xpath('//div[@class="houseList"]/dl')

        for dl in dl_list:
            # 使用items.py中的item对象，存储所需信息
            item = HouseSpiderItem()
            # 标题
            title = dl.xpath('.//p[@class="title"]/a/text()').extract_first()
            try:
                item['title'] = title.strip() if title else ''
            except Exception as e:
                with open('error_url.txt', 'a+') as f:
                    f.write(response.url)
                    f.write('\n')
                continue

            # 这是一个列表值，里面包含了：租房类型，户型，大小，朝向
            item['info_list'] = dl.xpath('.//p[@class="font15 mt12 bold"]/text()').extract()

            # 地址
            item['address'] = dl.xpath('.//dd[@class="info rel"]/p[@class="gray6 mt12"]//text()').extract()

            # 交通
            item['traffic'] = dl.xpath('.//p[@class="mt12"]/span[@class="note subInfor"]//text()').extract()

            # 价格
            item['price'] = dl.xpath('.//span[@class="price"]/text()').extract_first()

            # 获取详情页地址
            part_url = dl.xpath('.//p[@class="title"]/a/@href').extract_first()
            # 判断时候拿到详情页地址
            if part_url:
                url = self.baseUrl + part_url
                item['url'] = url

                time.sleep(self.random_sleep_time(start, stop))
                # 发送请求获取详情页的数据
                yield response.follow(url=url, callback=self.parse_detail, meta=item)

    def parse_detail(self, response):
        """
        详情页的解析函数，用来抓取正文
        :param response: 详情页的响应对象
        :return:
        """
        item = response.meta
        # 房屋亮点
        item['liangdian'] = response.xpath('//li[contains(@class, "fyld")]/div[2]/text()').extract_first()
        # 周边配套
        item['peitao'] = response.xpath('//li[contains(@class, "zbpt")]/div[2]//text()').extract_first()
        # 交通出行
        item['chuxing'] = response.xpath('//li[contains(@class, "jtcx")]/div[2]/text()').extract_first()
        # 房屋配套设施
        item['sheshi'] = response.xpath('//div[contains(@class, "zf_new_ptss")]/div[2]/ul//text()').extract()
        # 发布时间
        item['pulish_time'] = response.xpath('//p[contains(@class, "fybh-zf")]/span[2]/text()').extract_first()
        # 销售名字
        item['people_name'] = response.xpath('//span[@class="zf_jjname"]//text()').extract()
        # 手机号
        item['phone_num'] = response.xpath('//p[@class="text_phone"]/text()').extract_first()
        print("==============item=======================")
        print(item)
        print("==============item=======================")
        # 返回数据给pipline
        # yield item

    def random_sleep_time(self, start, stop):
        """ 随机休眠函数，⽤来控制爬⾍的爬取速率
        :return: float类型，单位：秒
        """

        s_time = random.uniform(start, stop)
        return s_time
