import re
import time

import scrapy
import urllib.parse
from scrapy_redis.spiders import RedisSpider
from conch_findroom.items import RentDataItem, BorderItem

class ZfSpiderSpider(RedisSpider):
    name = 'zf_spider'
    allowed_domains = ['ke.com','zu.ke.com','dianpu.ke.com']
    # start_urls = ['https://www.ke.com/city/']
    redis_key = "zf_spider:start_urls"

    def parse(self, response):
        print('开启租房爬虫项目数据采集'.center(100, '*'))
        lis = response.xpath('//div[contains(@class,"city-item ")]/div[@class="city_list_section"]/ul/li')  # 省份字母分类
        for li in lis[4:5]:  # G
            divs = li.xpath('.//div[@class="city_list"]/div[@class="city_province"]')  # 每个字母里的省份
            for div in divs[0:1]:  # 广东
                province = div.xpath('./div[@class="city_list_tit c_b"]/text()').get().strip()  # 省份名
                citys = div.xpath('./ul/li')  # 每个省份的城市
                for city in citys[:1]:  # 广州  韶关  深圳
                    city_name = city.xpath('./a/text()').extract_first()  # 城市名
                    city_url = city.xpath('./a/@href').extract_first()  # 城市链接
                    city = city_url.split('/')[-1].split('.')[0]
                    # print(province,city_name,city)

                    # 租房
                    rent_href = f'https://{city}.zu.ke.com/zufang/'
                    yield scrapy.Request(
                        url=rent_href, callback=self.get_area_url, meta={'province': province, 'city_name': city_name}
                    )

    # 获取区域链接
    def get_area_url(self, response):
        province = response.meta['province']
        city_name = response.meta['city_name']
        lis = response.xpath('//ul[@data-target="area"]/li[contains(@class,"filter__item--level2  ")]/a')
        for li in lis:
            area_name = li.xpath('./text()').extract_first()  # 区域名
            area_href = li.xpath('./@href').extract_first()  # 区域详情链接
            area_href = urllib.parse.urljoin(response.url, area_href)  # 拼接完整链接
            # print(item,'\n',area_href)

            yield scrapy.Request(
                url=area_href, callback=self.get_shop_url,
                meta={'province': province, 'city_name': city_name, 'area_name': area_name}
            )

    # 获取商铺链接
    def get_shop_url(self, response):
        province = response.meta['province']
        city_name = response.meta['city_name']
        area_name = response.meta['area_name']
        lis = response.xpath('//ul[@data-target="area"]/li[contains(@class,"filter__item--level3  ")]/a')
        for li in lis:
            shop_name = li.xpath('./text()').extract_first()  # 商铺名
            shop_href = li.xpath('./@href').extract_first()  # 商铺详情链接
            shop_href = urllib.parse.urljoin(response.url, shop_href)  # 拼接完整链接
            # print(item, '\n', shop_href)

            yield scrapy.Request(
                url=shop_href, callback=self.get_detail_url,
                meta={'province': province, 'city_name': city_name, 'area_name': area_name, 'shop_name': shop_name}
            )

    # 获取详情页链接及翻页
    def get_detail_url(self, response):
        province = response.meta['province']
        city_name = response.meta['city_name']
        area_name = response.meta['area_name']
        shop_name = response.meta['shop_name']
        # print(province,city_name,area_name,shop_name,response.url)

        divs = response.xpath('//div[@class="content__list"]/div[@class="content__list--item"]')
        for div in divs:
            title = div.xpath('./a/@title').extract_first()  # 标题
            detail_url = div.xpath('./a/@href').extract_first()  # 详情页链接
            detail_url = urllib.parse.urljoin(response.url, detail_url)
            # print(title,detail_url)

            if len(detail_url) > 45:  # 过滤掉一些公寓房源
                yield scrapy.Request(
                    url=detail_url, callback=self.get_detail_data,
                    meta={'province': province, 'city_name': city_name, 'area_name': area_name, 'shop_name': shop_name}
                )
            else:
                # print(detail_url)
                return

        # 翻页
        lis = response.xpath('//div[@id="content"]/div/ul[2]/li')
        if lis:
            for li in lis:
                next_url = li.xpath('./a/@href').extract_first()  # 翻页链接
                next_url = urllib.parse.urljoin(response.url, next_url)
                # print(next_url)
                yield scrapy.Request(
                    url=next_url, callback=self.get_detail_url,
                    meta={'province': province, 'city_name': city_name, 'area_name': area_name, 'shop_name': shop_name}
                )
        else:
            return

    # 获取详情页数据
    def get_detail_data(self, response):
        rent_id = response.url.split('/')[-1].split('.')[0]  # 房源id
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        area_name = response.meta['area_name']  # 区域名
        shop_name = response.meta['shop_name']  # 商铺名

        title = response.css('p.content__title::text').extract_first()  # 标题
        # 小区名称
        community = title.split(' ')
        if '·' in community[0]:
            community = community[0].split('·')[1]
        else:
            community = community[0]
        # 维护时间
        maintain_time = response.xpath('//div[@class="content__subtitle"]/text()[1]').extract_first()
        maintain_time = re.findall(r'房源维护时间：(.*)', maintain_time)
        maintain_time = maintain_time[0].strip() if maintain_time != [] else None
        # 房产局核验码
        check_code = response.xpath('//i[@class="gov_title"]/text()[2]|//i[@class="house_code"]/text()').extract_first()
        check_code = re.findall(r'\W+\d+', check_code)
        check_code = check_code[0].strip('：') if check_code != [] else None
        payment = response.css('ul.table_row li:nth-child(1)::text').extract_first()  # 付款方式
        rent_price = response.css('ul.table_row li:nth-child(2)::text').extract_first()  # 租金
        deposit = response.css('ul.table_row li:nth-child(3)::text').extract_first()  # 押金
        service_price = response.css('ul.table_row li:nth-child(4)::text').extract_first()  # 服务费
        intermediary_price = response.css('ul.table_row li:nth-child(5)::text').get()  # 中介费
        lease_mode = response.css('ul.content__aside__list li:nth-child(1)::text').get()  # 租赁方式
        # 房屋类型
        house_type = response.xpath('//ul[@class="content__aside__list"]/li[2]/text()').get()
        house_type = house_type[:6] if house_type else None
        area = response.xpath('//div[@id="info"]/ul[1]/li[2]/text()').get().strip('面积：')  # 面积
        house_face = response.xpath('//div[@id="info"]/ul[1]/li[3]/text()').get().strip('朝向：')  # 朝向
        check_in = response.xpath('//div[@id="info"]/ul[1]/li[6]/text()').get().strip('入住：')  # 入住
        floor = response.xpath('//div[@id="info"]/ul[1]/li[8]/text()').get().strip('楼层：')  # 楼层
        elevator = response.xpath('//div[@id="info"]/ul[1]/li[9]/text()').get().strip('电梯：')  # 电梯
        parklot = response.xpath('//div[@id="info"]/ul[1]/li[11]/text()').get().strip('车位：')  # 车位
        water = response.xpath('//div[@id="info"]/ul[1]/li[12]/text()').get()[3:]  # 用水
        electric = response.xpath('//div[@id="info"]/ul[1]/li[14]/text()').get()[3:]  # 用电
        gas = response.xpath('//div[@id="info"]/ul[1]/li[15]/text()').get()[3:]  # 燃气
        heating = response.xpath('//div[@id="info"]/ul[1]/li[17]/text()').get()[3:]  # 采暖
        lease_term = response.xpath('//div[@id="info"]/ul[2]/li[2]/text()').get().strip('租期：')  # 租期
        look_house = response.xpath('//div[@id="info"]/ul[2]/li[5]/text()').get().strip('看房：')  # 看房
        # 房屋图片
        house_img = response.css('ul.content__article__slide__wrapper div:nth-child(1) img::attr(src)').get()
        rent_item = RentDataItem(
            rent_id=rent_id, province=province, city_name=city_name, area_name=area_name, shop_name=shop_name,
            title=title,
            community=community, maintain_time=maintain_time, check_code=check_code, payment=payment,
            rent_price=rent_price,
            deposit=deposit, service_price=service_price, intermediary_price=intermediary_price,
            lease_mode=lease_mode,
            house_type=house_type, area=area, house_face=house_face, check_in=check_in, floor=floor,
            elevator=elevator,
            parklot=parklot, water=water, electric=electric, gas=gas, heating=heating, lease_term=lease_term,
            look_house=look_house, house_img=house_img
        )
        # print(item)

        # 经纪人
        broker_id = response.xpath('//div[@class="content__subtitle"]/div/@data-appoint-uc-id').extract_first()
        if broker_id != None:
            broker_url = 'https://dianpu.ke.com/' + broker_id
            # print(broker_url)
            yield scrapy.Request(
                url=broker_url, callback=self.get_broker_data, meta={'rent_item': rent_item}
            )
        else:   # 若没有经纪人id，则返回空，返回房源数据
            broker_id = int(time.time() * 1000)  # 12位数的时间戳
            item = BorderItem(
                broker_id=broker_id, broker_name='', contact='', shop_group='',
                service_years='', personal_score='', broker_img=''
            )
            # print(item)
            yield item

            rent_item['broker_id'] = broker_id
            yield rent_item

    # 获取经纪人信息
    def get_broker_data(self, response):
        broker_id = re.findall(r'\d+', response.url)[0]  # 经纪人id
        broker_name = response.css('span.agent-name::text').extract_first()  # 经纪人名称
        contact = response.css('span.agent-tel::text').extract_first()  # 联系方式
        shop_group = response.css('span.map-text::text').extract_first()  # 店组
        service_years = response.css('ul.info-list li:nth-child(1) span:nth-child(2)::text').get()  # 服务年限
        personal_score = response.css('ul.info-list li:nth-child(2) span:nth-child(2)::text').get()  # 个人成绩
        # 经纪人照片
        broker_img = response.css('div.left-part img::attr(src)').extract_first()

        item = BorderItem(
            broker_id=broker_id, broker_name=broker_name, contact=contact, shop_group=shop_group,
            service_years=service_years, personal_score=personal_score, broker_img=broker_img
        )
        # print(item)
        yield item

        rent_item = response.meta['rent_item']
        rent_item['broker_id'] = broker_id
        yield rent_item



