import json
import re
import time

import scrapy
import urllib.parse
from scrapy_redis.spiders import RedisSpider
from conch_findroom.items import SellRoomDataItem,BorderItem,DealRoomDataItem


class EsfSpiderSpider(RedisSpider):
    name = 'esf_spider'
    allowed_domains = ['ke.com','dianpu.ke.com']
    # start_urls = ['https://www.ke.com/city/']
    redis_key = "esf_spider:start_urls"

    def parse(self, response):
        print('开启二手房爬虫项目数据采集'.center(100, '='))
        lis = response.xpath('//div[contains(@class,"city-item ")]/div[@class="city_list_section"]/ul/li')  # 省份字母分类
        for li in lis[4:5]:
            divs = li.xpath('.//div[@class="city_list"]/div[@class="city_province"]')  # 每个字母里的省份
            for div in divs[0:1]:
                province = div.xpath('./div[@class="city_list_tit c_b"]/text()').get().strip()  # 省份名
                citys = div.xpath('./ul/li')  # 每个省份的城市
                for city in citys[:1]:
                    city_name = city.xpath('./a/text()').extract_first()  # 城市名
                    city_url = city.xpath('./a/@href').extract_first()  # 城市链接
                    city_url = urllib.parse.urljoin(response.url, city_url)
                    # print(province,city_name,city_url)

                    # 在售二手房
                    city_sell_esf_url = city_url + '/ershoufang/'
                    # city_sell_esf_url = 'https://hui.ke.com' + '/ershoufang/'
                    yield scrapy.Request(
                        url=city_sell_esf_url, callback=self.get_sell_area_url,
                        meta={'province': province, 'city_name': city_name}
                    )

                    # 成交二手房
                    city_deal_esf_url = city_url + '/chengjiao/'
                    yield scrapy.Request(
                        url=city_deal_esf_url, callback=self.get_deal_area_url,
                        meta={'province': province, 'city_name': city_name}
                    )

    # 获取在售`区域链接
    def get_sell_area_url(self, response):
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        divs = response.xpath('//div[@data-role="ershoufang"]/div/a')
        for div in divs:
            area_name = div.xpath('./text()').extract_first()  # 区域名
            area_href = div.xpath('./@href').extract_first()  # 区域详情链接
            area_href = urllib.parse.urljoin(response.url, area_href)  # 拼接完整链接
            # print(province,city_name,area_name,area_href)

            yield scrapy.Request(
                url=area_href, callback=self.get_sell_shop_url,
                meta={'province': province, 'city_name': city_name, 'area_name': area_name}
            )

    # 获取在售`商铺(镇)链接
    def get_sell_shop_url(self, response):
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        area_name = response.meta['area_name']  # 区域名
        divs = response.xpath('//div[@data-role="ershoufang"]/div[2]/a')
        for div in divs:
            shop_name = div.xpath('./text()').extract_first()  # 商铺名
            shop_href = div.xpath('./@href').extract_first()  # 商铺详情链接
            shop_href = urllib.parse.urljoin(response.url, shop_href)  # 拼接完整链接
            # print(province,city_name,area_name,shop_name,shop_href)

            yield scrapy.Request(
                url=shop_href, callback=self.sell_next_url,
                meta={'province': province, 'city_name': city_name, 'area_name': area_name, 'shop_name': shop_name}
            )

    # 在售-列表页翻页
    def sell_next_url(self, response):
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        area_name = response.meta['area_name']  # 区域名
        shop_name = response.meta['shop_name']  # 商铺名
        # print(province, city_name, area_name, shop_name,response.url)

        # 翻页
        next_num = response.xpath('//div[@class="page-box fr"]/div/@page-data').extract_first()
        # pages = json.loads(next_num)['totalPage']  # 总页数
        if next_num is not None:
            pages = re.findall(r'"totalPage":(\d+)', next_num)[0]
            for page in range(1, int(pages) + 1, 1):
                next_url = response.url + f'pg{page}/'  # 拼接翻页链接

                yield scrapy.Request(
                    url=next_url, callback=self.get_sell_detail_url,
                    meta={'province': province, 'city_name': city_name, 'area_name': area_name, 'shop_name': shop_name}
                )
        else:
            return



    # 获取在售`二手房详情页链接
    def get_sell_detail_url(self, response):
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        area_name = response.meta['area_name']  # 区域名
        shop_name = response.meta['shop_name']  # 商铺名
        # print(province, city_name, area_name, shop_name, response.url)
        lis = response.css('ul.sellListContent li.clear')
        for li in lis:
            title = li.css('a.img::attr(title)').extract_first()  # 标题
            detail_url = li.css('a.img::attr(href)').extract_first()  # 详情页链接
            # print(province,city_name,area_name, shop_name, title, detail_url)

            yield scrapy.Request(
                url=detail_url, callback=self.get_sell_detail_data,
                meta={'province': province, 'city_name': city_name, 'area_name': area_name, 'shop_name': shop_name}
            )

    # 获取在售`详情页房源数据
    def get_sell_detail_data(self, response):
        sell_item = SellRoomDataItem()
        sell_item['sell_id'] = re.findall(r'\d+', response.url)[0]  # 房源id
        sell_item['province'] = response.meta['province']  # 省份名
        sell_item['city_name'] = response.meta['city_name']  # 城市名
        sell_item['area_name'] = response.meta['area_name']  # 区域名
        sell_item['shop_name'] = response.meta['shop_name']  # 商铺名

        sell_item['title'] = response.css('div.title h1::attr(title)').extract_first()  # 标题
        sell_item['total_price'] = response.css('span.total::text').extract_first()  # 总价/万
        sell_item['unit_price'] = response.css('span.unitPriceValue::text').extract_first()  # 单价（元/平米）
        sell_item['community'] = response.xpath('//div[@class="communityName"]/a[1]/text()').extract_first()  # 小区名称

        # 房源基本信息
        # 基本属性
        dic = {}
        li1 = response.xpath(
            '//div[@class="introContent"]/div[@class="base"]/div[@class="content"]/ul/li//text()').extract()
        for index, info in enumerate(li1):
            dic[index] = info

        sell_item['house_type'] = None  # 房屋户型
        sell_item['floor'] = None  # 所在楼层
        sell_item['bullde_area'] = None  # 建筑面积
        sell_item['set_area'] = None  # 套内面积
        sell_item['house_structure'] = None  # 户型结构
        sell_item['bullde_type'] = None  # 建筑类型
        sell_item['house_face'] = None  # 房屋朝向
        sell_item['bullde_structure'] = None  # 建筑结构
        sell_item['decoration'] = None  # 装修情况
        sell_item['villa_type'] = None  # 别墅类型
        sell_item['Echelon_pro'] = None  # 梯户比例
        sell_item['elevator'] = None  # 配备电梯
        for key, value in dic.items():
            if value == '房屋户型':
                sell_item['house_type'] = dic[key + 1]
            elif value == '所在楼层':
                sell_item['floor'] = dic[key + 1]
            elif value == '建筑面积':
                sell_item['bullde_area'] = dic[key + 1]
                sell_item['bullde_area'] = re.findall(r'(.*?)㎡', sell_item['bullde_area'])[0]
            elif value in '套内面积':
                sell_item['set_area'] = dic[key + 1]
                if sell_item['set_area'] != '暂无数据':
                    sell_item['set_area'] = re.findall(r'(.*?)㎡', sell_item['set_area'])[0]
            elif value in '户型结构':
                sell_item['house_structure'] = dic[key + 1]
            elif value == '建筑类型':
                sell_item['bullde_type'] = dic[key + 1]
            elif value == '房屋朝向':
                sell_item['house_face'] = dic[key + 1]
            elif value == '建筑结构':
                sell_item['bullde_structure'] = dic[key + 1]
            elif value == '装修情况':
                sell_item['decoration'] = dic[key + 1]
            elif value in '别墅类型':
                sell_item['villa_type'] = dic[key + 1]
            elif value in '梯户比例':
                sell_item['Echelon_pro'] = dic[key + 1]
            elif value in '配备电梯':
                sell_item['elevator'] = dic[key + 1]

        # 交易属性
        li2 = response.xpath('//div[@class="transaction"]/div[@class="content"]/ul')
        # 挂牌时间
        sell_item['listed_time'] = li2.xpath('./li[1]/text()').extract_first().strip()
        sell_item['ownership'] = li2.xpath('./li[2]/text()').extract_first().strip()  # 交易权属
        sell_item['last_time'] = li2.xpath('./li[3]/text()').extract_first().strip()  # 上次交易时间
        sell_item['house_use'] = li2.xpath('./li[4]/text()').extract_first().strip()  # 房屋用途
        sell_item['house_years'] = li2.xpath('./li[5]/text()').extract_first().strip()  # 房屋年限
        sell_item['property_belo'] = li2.xpath('./li[6]/text()').extract_first().strip()  # 产权所属
        sell_item['mortgage_info'] = li2.xpath('./li[7]/span[2]/text()').extract_first().strip()  # 抵押信息

        sell_item['house_img'] = response.css('ul.smallpic li::attr(data-src)').extract_first()  # 房源图片

        '''经纪人'''
        broker_url = response.css('div.ke-agent-sj-fr .ke-agent-sj-info a::attr(href)').extract_first()
        yield scrapy.Request(
            url=broker_url, callback=self.get_sell_broker_data, meta={'sell_item': sell_item}
        )

    # 获取经纪人信息
    def get_sell_broker_data(self, response):
        broker_id = re.findall(r'\d+', response.url)[0]  # 经纪人id
        broker_name = response.css('span.agent-name::text').extract_first()  # 经纪人名称
        contact = response.css('span.agent-tel::text').extract_first()  # 联系方式
        shop_group = response.css('span.map-text::text').extract_first()  # 店组
        service_years = response.css('ul.info-list li:nth-child(1) span:nth-child(2)::text').get()  # 服务年限
        personal_score = response.css('ul.info-list li:nth-child(2) span:nth-child(2)::text').get()  # 个人成绩
        # 经纪人照片
        broker_img = response.css('div.left-part img::attr(src)').extract_first()

        item = BorderItem(
            broker_id=broker_id, broker_name=broker_name, contact=contact, shop_group=shop_group,
            service_years=service_years, personal_score=personal_score, broker_img=broker_img
        )
        # print(item)
        yield item

        sell_item = response.meta['sell_item']
        sell_item['broker_id'] = broker_id
        yield sell_item

    # 获取成交`区域链接
    def get_deal_area_url(self, response):
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        divs = response.xpath('//div[@data-role="ershoufang"]/div/a')
        for div in divs:
            area_name = div.xpath('./text()').extract_first()  # 区域名
            area_href = div.xpath('./@href').extract_first()  # 区域详情链接
            area_href = urllib.parse.urljoin(response.url, area_href)  # 拼接完整链接

            yield scrapy.Request(
                url=area_href, callback=self.get_deal_shop_url,
                meta={'province': province, 'city_name': city_name, 'area_name': area_name}
            )

    # 获取成交`商铺(镇)链接
    def get_deal_shop_url(self, response):
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        area_name = response.meta['area_name']  # 区域名
        divs = response.xpath('//div[@data-role="ershoufang"]/div[2]/a')
        for div in divs:
            shop_name = div.xpath('./text()').extract_first()  # 商铺名
            shop_href = div.xpath('./@href').extract_first()  # 商铺详情链接
            shop_href = urllib.parse.urljoin(response.url, shop_href)  # 拼接完整链接

            yield scrapy.Request(
                url=shop_href, callback=self.deal_next_url,
                meta={'province': province, 'city_name': city_name, 'area_name': area_name, 'shop_name': shop_name}
            )

    # 成交-列表页翻页
    def deal_next_url(self, response):
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        area_name = response.meta['area_name']  # 区域名
        shop_name = response.meta['shop_name']  # 商铺名
        # 翻页
        next_num = response.xpath('//div[@class="page-box fr"]/div/@page-data').extract_first()
        # pages = json.loads(next_num)['totalPage']  # 总页数
        if next_num is not None:
            pages = re.findall(r'"totalPage":(\d+)', next_num)[0]
        else:
            return

        for page in range(1, int(pages) + 1, 1):
            next_url = response.url + f'pg{page}/'  # 拼接翻页链接
            yield scrapy.Request(
                url=next_url, callback=self.get_deal_detail_url,
                meta={'province': province, 'city_name': city_name, 'area_name': area_name, 'shop_name': shop_name}
            )

    # 获取成交`二手房详情页链接
    def get_deal_detail_url(self, response):
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        area_name = response.meta['area_name']  # 区域名
        shop_name = response.meta['shop_name']  # 商铺名
        lis = response.css('ul.listContent li')
        for li in lis:
            detail_url = li.css('div.info div.title a::attr(href)').extract_first()  # 成交-详情链接
            # print(detail_url)
            yield scrapy.Request(
                url=detail_url, callback=self.get_deal_detail_data,
                meta={'province': province, 'city_name': city_name, 'area_name': area_name, 'shop_name': shop_name}
            )

    # 获取成交`详情页房源数据
    def get_deal_detail_data(self, response):
        deal_id = re.findall(r'\d+', response.url)[0]  # 房源id
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        area_name = response.meta['area_name']  # 区域名
        shop_name = response.meta['shop_name']  # 商铺名

        title = response.css('h1.main::text').extract_first().strip()  # 标题
        # 成交时间
        deal_time = response.css('h1.main strong::text').extract_first()
        deal_time = re.sub(r'[成交]', '', deal_time)
        deal_price = response.css('span.dealTotalPrice i::text').get()  # 成交价格（万）
        unit_pirce = response.css('div.price b::text').extract_first()  # 单价（元/平）
        listed_price = response.xpath('//div[@class="msg"]/span[1]/label/text()').get().strip()  # 挂牌价格
        deal_day = response.xpath('//div[@class="msg"]/span[2]/label/text()').get().strip()  # 成交周期(天)
        adprice_num = response.xpath('//div[@class="msg"]/span[3]/label/text()').get().strip()  # 调价次数
        carrylook_num = response.xpath('//div[@class="msg"]/span[4]/label/text()').get().strip()  # 带看次数
        pay_numpeople = response.xpath('//div[@class="msg"]/span[5]/label/text()').get().strip()  # 关注人数
        browse_num = response.xpath('//div[@class="msg"]/span[6]/label/text()').get().strip()  # 浏览次数

        # 房源基本信息
        # 基本属性
        li1 = response.css('div.base .content ul')
        house_type = li1.css('li:nth-child(1)::text').extract_first().strip()  # 户型
        floor = li1.css('li:nth-child(2)::text').extract_first().strip()  # 楼层
        bullde_area = li1.css('li:nth-child(3)::text').extract_first().strip()  # 建筑面积
        house_structure = li1.css('li:nth-child(4)::text').extract_first().strip()  # 户型结构
        Inside_area = li1.css('li:nth-child(5)::text').extract_first().strip()  # 套内面积
        bullde_type = li1.css('li:nth-child(6)::text').extract_first().strip()  # 建筑类型
        house_face = li1.css('li:nth-child(7)::text').extract_first().strip()  # 房屋朝向
        built_year = li1.css('li:nth-child(8)::text').extract_first().strip()  # 建成年代
        decoration = li1.css('li:nth-child(9)::text').extract_first().strip()  # 装修情况
        bullde_structure = li1.css('li:nth-child(10)::text').extract_first().strip()  # 建筑结构
        Heating_method = li1.css('li:nth-child(11)::text').extract_first().strip()  # 供暖方式
        Echelon_pro = li1.css('li:nth-child(12)::text').extract_first().strip()  # 梯户比例
        elevator = li1.css('li:nth-child(13)::text').extract_first().strip()  # 配备电梯

        # 交易属性
        li2 = response.css('div.transaction .content ul')
        LJ_num = li2.xpath('./li[1]/text()').extract_first().strip()  # 链家编号
        ownership = li2.xpath('./li[2]/text()').extract_first().strip()  # 交易权属
        listed_time = li2.xpath('./li[3]/text()').extract_first().strip()  # 挂牌时间
        house_use = li2.xpath('./li[4]/text()').extract_first().strip()  # 房屋用途
        house_years = li2.xpath('./li[5]/text()').extract_first().strip()  # 房屋年限
        property_belo = li2.xpath('./li[6]/text()').extract_first().strip()  # 产权所属

        house_img = response.css('div.thumbnail ul li::attr(data-src)').extract_first()  # 房屋图片
        deal_item = DealRoomDataItem(
            deal_id=deal_id, province=province, city_name=city_name, area_name=area_name, shop_name=shop_name,
            title=title, deal_time=deal_time, deal_price=deal_price, unit_pirce=unit_pirce, listed_price=listed_price,
            deal_day=deal_day, adprice_num=adprice_num, carrylook_num=carrylook_num, pay_numpeople=pay_numpeople,
            browse_num=browse_num, house_type=house_type, floor=floor, bullde_area=bullde_area,
            house_structure=house_structure,
            Inside_area=Inside_area, bullde_type=bullde_type, house_face=house_face, built_year=built_year,
            decoration=decoration,
            bullde_structure=bullde_structure, Heating_method=Heating_method, Echelon_pro=Echelon_pro,
            elevator=elevator,
            LJ_num=LJ_num, ownership=ownership, listed_time=listed_time, house_use=house_use, house_years=house_years,
            property_belo=property_belo, house_img=house_img
        )

        # 经纪人链接
        broker_url = '/'.join(response.url.split('/')[:-1]) + f'/display?hid={deal_id}'
        yield scrapy.Request(
            url=broker_url, callback=self.deal_broker_detail, meta={'deal_item': deal_item}
        )

    # 成交-获取经纪人详情链接
    def deal_broker_detail(self, response):
        deal_item = response.meta['deal_item']
        # print('经纪人数据长度：',len(response.json()))
        text = response.text  # 源码
        result = json.loads(text)  # json序列化
        if result:
            data = result['data']
            if data != None:
                broker_url = data['agentUrl']
                yield scrapy.Request(
                    url=broker_url, callback=self.get_deal_broker_data, meta={'deal_item': deal_item}
                )
            else:  # 若没有经纪人id，则返回空，返回房源数据
                broker_id = int(time.time() * 1000)  # 12位数的时间戳
                item = BorderItem(
                    broker_id=broker_id, broker_name='', contact='', shop_group='',
                    service_years='', personal_score='', broker_img=''
                )
                # print(item)
                yield item

                deal_item['broker_id'] = broker_id
                yield deal_item

    # 获取经纪人信息
    def get_deal_broker_data(self, response):
        broker_id = re.findall(r'\d+', response.url)[0]  # 经纪人id
        broker_name = response.css('span.agent-name::text').extract_first()  # 经纪人名称
        contact = response.css('span.agent-tel::text').extract_first()  # 联系方式
        shop_group = response.css('span.map-text::text').extract_first()  # 店组
        service_years = response.css('ul.info-list li:nth-child(1) span:nth-child(2)::text').get()  # 服务年限
        personal_score = response.css('ul.info-list li:nth-child(2) span:nth-child(2)::text').get()  # 个人成绩
        # 经纪人照片
        broker_img = response.css('div.left-part img::attr(src)').extract_first()

        item = BorderItem(
            broker_id=broker_id, broker_name=broker_name, contact=contact, shop_group=shop_group,
            service_years=service_years, personal_score=personal_score, broker_img=broker_img
        )
        # print(item)
        yield item

        deal_item = response.meta['deal_item']
        deal_item['broker_id'] = broker_id
        yield deal_item
