# -*- coding: utf-8 -*-
import scrapy
import random, time, re
from house_spider.utils.my_rfdpupefilter import get_set_data
from ..settings import SLEEP_SATRT, SLEEP_STOP

start = SLEEP_SATRT
stop = SLEEP_STOP


class AnjukeSpider(scrapy.Spider):
    name = 'anjuke'
    allowed_domains = ['anjuke.com']

    # url模板
    baseUrl = "https://bj.zu.anjuke.com/fangyuan"

    # 每个区域的url
    urlDir = {
        "朝阳": "/chaoyang/",
        "海淀": "/haidian/",
        "东城": "/dongchenga/",
        "西城": "/xicheng/",
        "丰台": "/fengtai/",
        "通州": "/tongzhou/",
        "石景山": "/shijingshan/",
        "昌平": "/changping/",
        "大兴": "/daxing/",
        "顺义": "/shunyi/",
        "房山": "/fangshan/",
        "门头沟": "/mentougou/",
        "密云": "/miyun/",
        "怀柔": "/huairou/",
        "平谷": "/pinggua/",
        "延庆": "/yanqing/",
    }

    # 页码
    page = 30

    # 过滤器
    set_data = get_set_data()

    def start_requests(self):
        """
        爬虫入口，用于生成所有的url地址
        :return:
        """
        urlList = []
        for name in self.urlDir.keys():
            for index in range(self.page):
                if index == 0:
                    urlList.append(self.baseUrl + self.urlDir[name] + "p1/")
                else:
                    urlList.append(self.baseUrl + self.urlDir[name] + "p" + str(index + 1) + "/")

        for url in urlList:
            time.sleep(self.random_sleep_time(1, 2))
            yield scrapy.Request(url=url)

    def parse(self, response):

        # 本页地址
        page_url = response.url
        # 房屋所在区域
        region = ''
        for key, value in self.urlDir.items():
            if value in page_url:
                region = key

        house_list = response.xpath('//div[@class="zu-itemmod"]')

        for house in house_list:

            item = dict()

            # 房屋所在区域
            item['region'] = region

            # 信息来源网站
            item['source'] = self.name

            # 标题
            item['title'] = house.xpath('.//div[@class="zu-info"]/h3/a/@title').extract_first()

            # 详情页链接，即房源链接
            item['url'] = house.xpath('.//div[@class="zu-info"]/h3/a/@href').extract_first()

            info_list = house.xpath('./div[@class="zu-info"]/p[@class="details-item tag"]//text()').extract()

            if len(info_list) > 0:
                info_list = [i.strip() for i in info_list if i != '|' and i != '\ue147']

                # 户型
                item['rooms'] = info_list[0] if len(info_list) > 0 else ''

                # 房屋大小
                area = info_list[1] if len(info_list) > 0 else ''
                item['area'] = re.findall(r'.*?(\d+).*?', area)[0]

            else:
                item['rooms'] = ''
                item['area'] = ''
                continue

            # 小区名称
            item['region_name'] = house.xpath(
                './div[@class="zu-info"]/address[@class="details-item"]/a/text()').extract_first()

            # 小区位置 交通1
            addr_str = house.xpath('./div[@class="zu-info"]/address[@class="details-item"]/text()').extract()
            if len(addr_str) > 0:
                item['block'] = addr_str[1].strip().split(' ')[0]
                item['traffic'] = addr_str[1].strip().split(' ')[1]
                item['address'] = item['block'] + '-' + item['region_name']
            else:
                continue

            # 租住形式
            item['rent_type'] = house.xpath(
                './div[@class="zu-info"]/p[@class="details-item bot-tag"]/span[1]/text()').extract_first()

            # 朝向
            item['direction'] = house.xpath(
                './div[@class="zu-info"]/p[@class="details-item bot-tag"]/span[2]/text()').extract_first()

            # 交通2
            traffic = house.xpath(
                './div[@class="zu-info"]/p[@class="details-item bot-tag"]/span[4]/text()').extract_first()
            if traffic is not None:
                item['traffic'] = traffic
            # 价格
            price = house.xpath('./div[@class="zu-side"]/p//text()').extract()
            item['price'] = price[0] if len(price) > 0 else ''

            yield scrapy.Request(item['url'], callback=self.parse_detail, meta=item)

    def parse_detail(self, response):
        item = response.meta

        # 房屋发布时间
        publish_time = response.xpath('//div[@class="right-info"]/text()').extract_first()  # '发布时间：2019年05月18日'
        publish_time = '-'.join(re.findall(r'.*?(\d+).*?(\d+).*?(\d+).*?', publish_time)[0])  # 2019-05-18
        timeArray = time.strptime(publish_time, "%Y-%m-%d")  # 转化为数组
        item['publish_time'] = int(time.mktime(timeArray))  # 转换为时间戳 1524822540

        # 配套设施
        furniture_list = response.xpath('//ul[@class="house-info-peitao cf"]//text()').extract()
        item['house_setup'] = '-'.join([i for i in furniture_list if i.isalpha() and i != '更多'])
        print(item)
        # yield item

    def random_sleep_time(self, start, stop):
        """
        随机休眠函数，用来控制爬虫的爬取速率
        :return: float类型，单位：秒
        """
        s_time = random.uniform(start, stop)
        return s_time
