import json
import re,time
import random
import scrapy
import urllib.parse
from scrapy_redis.spiders import RedisSpider
from conch_findroom.items import NewDataItem,BorderItem

class XfSpiderSpider(RedisSpider):
    name = 'xf_spider'
    allowed_domains = ['ke.com','fang.ke.com','dianpu.ke.com']
    # start_urls = ['https://www.ke.com/city/']
    redis_key = "xf_spider:start_urls"

    def parse(self, response):
        print('开启新房爬虫项目数据采集'.center(100, '+'))
        lis = response.xpath('//div[contains(@class,"city-item ")]/div[@class="city_list_section"]/ul/li')  # 省份字母分类
        # 广东
        for li in lis[4:5]:
            divs = li.xpath('.//div[@class="city_list"]/div[@class="city_province"]')  # 每个字母里的省份
            for div in divs[0:1]:
                province = div.xpath('./div[@class="city_list_tit c_b"]/text()').get().strip()  # 省份名
                citys = div.xpath('./ul/li')  # 每个省份的城市
                for city in citys[:1]:
                    city_name = city.xpath('./a/text()').extract_first()  # 城市名
                    city_url = city.xpath('./a/@href').extract_first()  # 城市链接
                    city = city_url.split('/')[-1].split('.')[0]
                    # print(province,city_name,city)

                    # 新房
                    new_url = f'https://{city}.fang.ke.com/loupan/'  # 城市新房链接
                    yield scrapy.Request(
                        url=new_url, callback=self.get_area_url, meta={'province': province, 'city_name': city_name}
                    )

    # 获取区域链接
    def get_area_url(self, response):
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        lis = response.xpath('//ul[@class="district-wrapper"]/li')
        for li in lis:
            area_name = li.xpath('./text()').extract_first()  # 区域名
            area_pinyin = li.xpath('./@data-district-spell').extract_first()  # 区域拼音
            area_url = urllib.parse.urljoin(response.url, area_pinyin)  # 拼接区域详情链接
            # print(province,city_name,area_name,area_url)

            yield scrapy.Request(
                url=area_url, callback=self.get_next_url,
                meta={'province': province, 'city_name': city_name, 'area_name': area_name}
            )

    # 获取列表页页数
    def get_next_url(self, response):
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        area_name = response.meta['area_name']  # 区域名
        # print(province, city_name, area_name, response.url)

        # 翻页
        last_num = response.xpath('//section[@class="se-part"][2]/div/a[last()]/text()').extract_first()  # 最大页数
        if last_num:
            for page in range(1, int(last_num) + 1, 1):
                next_url = response.url + f'/pg{page}/'
                # print(province,city_name,area_name,next_url)
                yield scrapy.Request(
                    url=next_url, callback=self.get_detail_url,
                    meta={'province': province, 'city_name': city_name, 'area_name': area_name}
                )
        else:
            return

    # 获取详情页链接
    def get_detail_url(self, response):
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        area_name = response.meta['area_name']  # 区域名
        # print(province, city_name, area_name, response.url)

        lis = response.xpath('//ul[@class="resblock-list-wrapper"]/li')
        for li in lis:
            title = li.xpath('.//div[@class="resblock-name"]/a/@title').extract_first()  # 标题
            detail_url = li.xpath('.//div[@class="resblock-name"]/a/@href').get()  # 详情页链接
            detail_url = urllib.parse.urljoin(response.url, detail_url)  # 拼接完整链接
            # 商铺名
            shop_name = li.xpath('./div/a[@class="resblock-location"]/text()[2]').extract_first().strip().split('/')[1]
            # 房屋户型
            house_type = li.xpath(
                './/div[@class="resblock-desc-wrapper"]/a[2]/span[not(contains(@class,"area"))]/text()').extract()
            house_type = '/'.join(house_type).strip('户型：/')
            # print(province, city_name, area_name, shop_name,title,detail_url)

            yield scrapy.Request(
                url=detail_url, callback=self.get_detail_data,
                meta={'province': province, 'city_name': city_name, 'area_name': area_name, 'shop_name': shop_name,
                      'house_type': house_type}
            )

    # 获取详情页数据
    def get_detail_data(self, response):
        house_unique = response.url.split('/')[-2]  # 房屋特有字段
        province = response.meta['province']  # 省份名
        city_name = response.meta['city_name']  # 城市名
        area_name = response.meta['area_name']  # 区域名
        shop_name = response.meta['shop_name']  # 商铺名
        house_type = response.meta['house_type']  # 户型

        title = response.css('h2.DATA-PROJECT-NAME::text').extract_first()  # 标题
        # contact_mode = response.css('div.share span::text').extract_first()  # 联系方式
        # 别名
        alias = response.css('div.other-name::text').extract_first()
        alias = re.sub(r'[\n\t\s别名：]', '', alias) if alias != None else None
        # 单价
        unit_price = response.xpath(
            '//div[@class="top-info "]/div[@class="price"]/span[@class="price-number"][1]/text()').get()
        # 总价
        total_price = response.xpath(
            '//div[@class="top-info "]/div[@class="price"]/span[@class="price-number"][2]/text()').get()
        adress = response.xpath('//ul[@class="info-list"]/li[1]/span[@class="content"]/text()').get()  # 地址
        open_time = response.css('div.open-date .content::text').extract_first()  # 开盘时间
        # 房屋图片
        house_img = response.xpath(
            '//div[contains(@class,"album")]/div[@class="carousel"]/ul/li[1]/img/@data-img').get()
        item = NewDataItem(
            house_unique=house_unique, province=province, city_name=city_name, area_name=area_name, shop_name=shop_name,
            house_type=house_type, title=title, alias=alias, unit_price=unit_price,
            total_price=total_price, adress=adress, open_time=open_time, house_img=house_img
        ) # contact_mode=contact_mode, 联系方式已无
        # print(item)
        hdiccityid = response.xpath('//ul[@id="sugBox"]/@data-cityid').extract_first()  # 参数id

        href_url = response.css('div.top-info  .more-building a::attr(href)').extract_first()  # 基本数据 详情链接
        href_url = urllib.parse.urljoin(response.url, href_url)  # 拼接
        # print(href_url)
        yield scrapy.Request(
            url=href_url, callback=self.get_final_data, meta={'item': item, 'hdiccityid': hdiccityid}
        )

    # 获取最终内容
    def get_final_data(self, response):
        item = response.meta['item']
        hdiccityid = response.meta['hdiccityid']  # 参数id
        # print(item['province'],item['city_name'],item['area_name'],item['shop_name'],item['house_type'])
        # 基本信息
        li1 = response.xpath('//ul[@class="x-box"][1]')
        item['proper_type'] = li1.xpath('./li[1]/span[2]/text()').extract_first()  # 物业类型
        item['house_feature'] = li1.xpath('./li[3]/span[2]/text()').extract_first()  # 项目特色
        item['sales_office'] = li1.xpath('./li[6]/span[2]/text()').extract_first()  # 售楼处地址
        item['developer'] = li1.xpath('./li[7]/span[2]/text()').extract_first()  # 开发商
        # 规划信息
        li2 = response.xpath('//ul[@class="x-box"][2]')
        item['builde_type'] = li2.xpath('./li[1]/span[2]/text()').extract_first()  # 建筑类型
        item['green_rate'] = li2.xpath('./li[2]/span[2]/text()').extract_first().strip()  # 绿化率
        item['land_area'] = li2.xpath('./li[3]/span[2]/text()').extract_first().strip()  # 占地面积
        item['volume_rate'] = li2.xpath('./li[4]/span[2]/text()').extract_first().strip()  # 容积率
        item['builde_area'] = li2.xpath('./li[5]/span[2]/text()').extract_first().strip()  # 建筑面积
        item['house_plan'] = li2.xpath('./li[7]/span[2]/text()').extract_first().strip()  # 规划户数
        item['property_years'] = li2.xpath('./li[8]/span[2]/text()').extract_first().strip()  # 产权年限
        item['handover_time'] = li2.xpath('./li[10]/span[2]/text()').extract_first().strip()  # 交房时间
        # 配套信息
        li3 = response.xpath('//ul[@class="x-box"][3]')
        item['property_company'] = li3.xpath('./li[1]/span[2]/text()').extract_first()  # 物业公司
        item['carbit_ratio'] = li3.xpath('./li[2]/span[2]/text()').extract_first()  # 车位配比
        item['property_price'] = li3.xpath('./li[3]/span[2]/text()').extract_first()  # 物业费
        item['Heating_method'] = li3.xpath('./li[4]/span[2]/text()').extract_first()  # 供暖方式
        item['water'] = li3.xpath('./li[5]/span[2]/text()').extract_first()  # 供水方式
        item['electric'] = li3.xpath('./li[6]/span[2]/text()').extract_first()  # 供电方式
        # 车位数
        carbit_num = li3.xpath('./li[7]/span[2]/text()').extract_first()
        item['carbit_num'] = re.sub(r'[\n\t\s]', '', carbit_num)
        # print(item)

        # 请求经纪人id
        yield scrapy.Request(
            url=f'https://ex.ke.com/sdk/recommend/html/100000352?hdicCityId={hdiccityid}&pageScene=im&positionId=9&id=100000352&mediumId=100000036&projectName={item["house_unique"]}&projectType=&required400=true',
            callback=self.get_broker_id, meta={'item': item}
        )

    # 获取经纪人链接id
    def get_broker_id(self, response):
        item = response.meta['item']
        results = json.loads(response.text)  # 转化成json格式  序列化

        # 经纪人id
        broker_id = results['data']['agentList']
        # 判断经纪人id是否存在，若无则为空，不继续请求经纪人数据信息
        if broker_id:
            broker_id = broker_id[0]['ucId']
            yield scrapy.Request(
                url=f'https://dianpu.ke.com/{broker_id}', callback=self.get_broker_data, meta={'item': item}
            )
        else:
            # broker_id = random.randint(100000000000, 999999999999)  # 随机生成12位数字
            broker_id = int(time.time()*100)    # 12位数的时间戳
            brokeritem = BorderItem(
                broker_id=broker_id, broker_name='', contact='', shop_group='',
                service_years='', personal_score='', broker_img=''
            )
            yield brokeritem

            item['broker_id'] = broker_id
            yield item

    # 获取经纪人信息 (注：先存主键表的数据，再存外键表的数据)
    def get_broker_data(self, response):
        broker_id = re.findall(r'\d+', response.url)[0]  # 经纪人id
        broker_name = response.css('span.agent-name::text').extract_first()  # 经纪人名称
        contact = response.css('span.agent-tel::text').extract_first()  # 联系方式
        shop_group = response.css('span.map-text::text').extract_first()  # 店组
        service_years = response.css('ul.info-list li:nth-child(1) span:nth-child(2)::text').get()  # 服务年限
        personal_score = response.css('ul.info-list li:nth-child(2) span:nth-child(2)::text').get()  # 个人成绩
        # 经纪人照片
        broker_img = response.css('div.left-part img::attr(src)').extract_first()

        brokeritem = BorderItem(
            broker_id=broker_id, broker_name=broker_name, contact=contact, shop_group=shop_group,
            service_years=service_years, personal_score=personal_score, broker_img=broker_img
        )
        yield brokeritem

        item = response.meta['item']
        item['broker_id'] = broker_id
        yield item
