# -*- coding: utf-8 -*-
# @Time    : 2021/9/23 9:44
# @Author  : ZSQ
# @Email   : zsq199170918@163.com
# @FileName: beike.py
# @Software: PyCharm

import scrapy
import re
import logging
from copy import copy, deepcopy
from HifoXf.items import ProjectItem
import urllib.parse
import json

logger = logging.getLogger(__name__)


class BeikeSpider(scrapy.Spider):
    name = 'beike'
    allowed_domains = ['ke.com']
    start_urls = ['https://www.ke.com/city/']

    custom_settings = {
        'CONCURRENT_REQUESTS': 6,
        'COOKIES_ENABLED': False,
        'CASE_ID_FILTER': True,
    }

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter
        super().__init__(*args, **kwargs)

    def parse(self, response, **kwargs):
        """
        爬取城市预处理
        :param response:
        :param kwargs:
        :return:
        """
        try:
            # 去重校验
            assert self.filter_flag != False, f'>>>>{self.name}:案例id去重配置失败<<<<'
            # 参数校验
            cities_li, type = self.get_crawl_or_filter_cities()
        except AssertionError as e:
            # 去重校验未通关，关闭爬虫
            self.crawler.engine.close_spider(self, e)
        except Exception as e:
            logger.error(e)
            # 校验未通过，关闭爬虫
            self.crawler.engine.close_spider(self, '参数错误')
        else:
            # 校验通过，城市预处理
            if cities_li:
                headers = {'Referer': 'https://www.ke.com/'}
                # 获取省(直辖市)分组
                province_div_li = response.xpath("//div[contains(@data-action,'国内')]//div[@class='city_province']")
                for province_div in province_div_li:
                    province_name = province_div.xpath("./div/text()").extract_first().strip()
                    # 获取该省下的城市分组
                    city_data_li = province_div.xpath("./ul/li")
                    for city_data in city_data_li:
                        city_name = city_data.xpath("./a/text()").extract_first()
                        city_url = city_data.xpath("./a/@href").extract_first()
                        city_url_f = self.format_crawl_city_url(city_url)
                        if city_url_f:
                            # 爬取列表
                            if type == 1:
                                if ('全国' in cities_li) or (city_name in cities_li):
                                    logging.info((province_name, city_name, city_url_f))
                                    # 【不区分行政区】获取项目列表首页
                                    yield response.follow(
                                        city_url_f + '/pg1/',
                                        headers=headers,
                                        callback=self.parse_project_li,
                                        meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name),
                                                  city_url=deepcopy(city_url_f), page_num=1, ),
                                        dont_filter=True,
                                    )
                            # 过滤列表
                            else:
                                if '全国' in cities_li:
                                    break
                                elif city_name not in cities_li:
                                    # 【不区分行政区】获取项目列表首页
                                    yield response.follow(
                                        city_url_f + '/pg1/',
                                        headers=headers,
                                        callback=self.parse_project_li,
                                        meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name),
                                                  city_url=deepcopy(city_url_f), page_num=1, ),
                                        dont_filter=True,
                                    )
                        else:
                            logger.error('{}-{} 新房url构造出租，url：{}'.format(province_name, city_name, city_url))
            else:
                logger.error('贝壳新房城市url列表匹配为空')
                self.crawler.engine.close_spider(self, '贝壳新房城市url列表匹配为空')

    def parse_project_li(self, response):
        """
        获取项目列表页
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        city_url = copy(response.meta['city_url'])
        page_num = copy(response.meta['page_num'])
        district_name = copy(response.meta.get('district_name', None))
        # 提取项目总数
        total_num = int(
            response.xpath("//div[@class='resblock-have-find']/span[@class='value']/text()").extract_first())
        # 项目总数=0
        if total_num == 0:
            if district_name:
                logger.warning('{}-{}-{} 新房项目总数为0'.format(province_name, city_name, district_name))
            else:
                logger.warning('{}-{} 新房项目总数为0'.format(province_name, city_name))
        # 0<项目总数<=2000
        elif 0 < total_num <= 2000:
            # 获取项目列表分组
            guess_u_like = response.xpath("//li[contains(text(),'猜你喜欢')]")
            # 如果有<猜你喜欢>标签
            if guess_u_like:
                project_li = response.xpath("//li[contains(text(),'猜你喜欢')]/preceding-sibling::li")
            else:
                project_li = response.xpath("//ul[@class='resblock-list-wrapper']/li")
            if project_li:
                # 遍历，提取项目列表信息
                for project_obj in project_li:
                    item_pt = ProjectItem()
                    item_pt['provinceName'] = province_name  # 省份
                    item_pt['cityName'] = city_name  # 城市
                    item_pt['projectName'] = project_obj.xpath(
                        ".//div[@class='resblock-name']/a/text()").extract_first()  # 项目名称
                    project_id_str = project_obj.xpath(
                        ".//a[@class='resblock-location']/@data-strategy-info").extract_first()
                    item_pt['projectId'] = self.get_project_id(project_id_str)  # 项目id
                    item_pt['projectUrl'] = project_obj.xpath(
                        ".//div[@class='resblock-name']/a/@href").extract_first()  # 项目url
                    item_pt['projectUrl'] = urllib.parse.urljoin(response.request.url, item_pt['projectUrl'])
                    item_pt['projectStatus'] = project_obj.xpath(
                        ".//div/span[@class='resblock-type']/text()").extract_first()  # 项目销售状态
                    item_pt['propertyType'] = project_obj.xpath(
                        ".//div[@class='resblock-name']/span[2]/text()").extract_first()  # 物业类型
                    item_pt['projectAddress'] = project_obj.xpath(
                        ".//a[@class='resblock-location']/text()").extract()  # 项目地址
                    item_pt['mainStructure'] = project_obj.xpath(
                        ".//a[@class='resblock-room']/*[not(@class='area')]/text()").extract()  # 户型
                    item_pt['buildingArea'] = project_obj.xpath(
                        ".//a[@class='resblock-room']/*[@class='area']/text()").extract()  # （户型）建筑面积
                    item_pt['avgPrice'] = project_obj.xpath(
                        ".//div[@class='resblock-price']/div[@class='main-price']/span/text()").extract()  # 均价
                    item_pt['totalPrice'] = project_obj.xpath(
                        ".//div[@class='resblock-price']/div[@class='second']/text()").extract_first()  # 总价
                    item_pt['tags'] = project_obj.xpath(
                        ".//div[@class='resblock-tag']/span/text()").extract()  # 标签/项目特色
                    project_name_spell = project_obj.xpath('./@data-project-name').extract_first()
                    # 获取项目详情
                    if item_pt['projectUrl'] and item_pt['projectId']:
                        # 根据项目id去重处理
                        if (self.settings['CASE_ID_FILTER'] and self.redis_conn.sadd('beike_xf_pid',
                                                                                     item_pt['projectId'])) or (
                                not self.settings['CASE_ID_FILTER']):
                            headers = {'Referer': item_pt['projectUrl']}
                            yield scrapy.Request(
                                item_pt['projectUrl'] + 'xiangqing/',
                                headers=headers,
                                callback=self.parse_project_detail,
                                meta=dict(item_pt=deepcopy(item_pt), ),
                            )
                    else:
                        print(item_pt['projectUrl'])
                        logger.error('{}-{}-{}-{} 项目url/id提取为空'.format(item_pt['provinceName'], item_pt['cityName'],
                                                                       item_pt['projectName'], project_name_spell))

                # 翻页
                next_page_num = page_num + 1
                yield response.follow(
                    city_url + '/pg{}/'.format(next_page_num),
                    callback=self.parse_project_li,
                    meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name),
                              city_url=deepcopy(city_url), page_num=next_page_num,
                              district_name=deepcopy(district_name), ),
                    dont_filter=True,
                )
            else:
                if district_name:
                    logger.warning('{}-{}-{}-第{}页 新房项目列表提取为空'.format(province_name, city_name, district_name, page_num))
                else:
                    logger.warning('{}-{}-第{}页 新房项目列表提取为空'.format(province_name, city_name, page_num))
        # 项目总数>=2000
        else:
            if page_num == 1:
                if not district_name:
                    # 获取行政区分组
                    district_li = response.xpath("//ul[@class='district-wrapper']/li")
                    for district_obj in district_li:
                        district_name = district_obj.xpath('./text()').extract_first()
                        district_spell = district_obj.xpath('./@data-district-spell').extract_first()
                        if '周边' not in district_name and '海外' not in district_name:
                            city_district_url = city_url + f'/{district_spell}'
                            # 【区分行政区】获取项目列表首页
                            yield response.follow(
                                city_district_url + '/pg1/',
                                callback=self.parse_project_li,
                                meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name),
                                          city_url=deepcopy(city_district_url), page_num=1,
                                          district_name=deepcopy(district_name), ),
                                dont_filter=True,
                            )
                else:
                    # todo 暂未发现同一个行政区下新房项目总数超过2000限制        --2021/09/24
                    logger.error(
                        '{}-{}-{} 新房项目总数为{}（超过2000限制）'.format(province_name, city_name, district_name, total_num))

    def parse_project_detail(self, response):
        """
        获取项目详情
        :param response:
        :return:
        """
        item_pt = copy(response.meta['item_pt'])
        data_dict = self.get_project_data(response)
        try:
            assert isinstance(data_dict, dict)
        except:
            logger.error(
                '{}-{}-{} 项目详情data信息提取出错'.format(item_pt['cityName'], item_pt['projectName'], response.request.url))
        else:
            item_pt['projectSubName'] = data_dict.get("resblock_alias", None)  # 别名
            item_pt['districtName'] = data_dict.get("district", None)  # 行政区/县
            item_pt['priceDesc'] = data_dict.get("price_desc", None)  # 价格描述
            item_pt['locationArea'] = data_dict.get("bizcircle_name", None)  # 区域位置
            item_pt['buildingType'] = data_dict.get("build_type", None)  # 建筑类型
            item_pt['propertyType'] = data_dict.get("house_type", None)  # 物业类型
            item_pt['planningTotalHouseholds'] = data_dict.get("total_room", None)  # 规划户数
            item_pt['developerName'] = data_dict.get("developer_company", None)  # 开发商
            item_pt['saleAddress'] = data_dict.get("store_addr", None)  # 销售地址
            item_pt['saleTel'] = data_dict.get("store_tel", None)  # 销售电话
            item_pt['greeningRate'] = data_dict.get("virescence_rate", None)  # 绿化率
            item_pt['floorAreaRatio'] = data_dict.get("cubage_rate", None)  # 容积率
            item_pt['coverageArea'] = data_dict.get("site_area", None)  # 占地面积
            item_pt['totalBuildingArea'] = data_dict.get("total_area", None)  # 总建筑面积
            item_pt['propertyRight'] = data_dict.get("properright", None)  # 产权年限
            item_pt['handoverDate'] = data_dict.get("hand_over_time", None)  # 交房日期
            item_pt['openingDate'] = data_dict.get("open_date", None)  # 开盘日期
            item_pt['parkingSpaceRatio'] = data_dict.get("carRatio", None)  # 车位比
            item_pt['propertyManagementFee'] = data_dict.get("property_price", None)  # 物业管理费
            item_pt['heatingSupply'] = data_dict.get("warm_suply", None)  # 供暖方式
            item_pt['waterSupply'] = data_dict.get("watersuply_kind", None)  # 供水方式
            item_pt['powerSupply'] = data_dict.get("powersuply_kind", None)  # 供电方式
            item_pt['parkingSpacesNum'] = data_dict.get("underground_car_num", None)  # 车位
            item_pt['lng'] = data_dict.get("longitude", None)  # 经度
            item_pt['lat'] = data_dict.get("latitude", None)  # 纬度
            item_pt['decorateSituation'] = data_dict.get("decoration", None)  # 装修情况
            item_pt['projectChronicle'] = self.deal_chronicle(response)  # 项目纪事
            # 提取预售许可证列表
            permit_list = data_dict.get("permit_list", None)
            if permit_list:
                for permit_dict in permit_list:
                    item_pt['preSalePermit'] = permit_dict.get('permit_number', None)  # 预售许可证
                    item_pt['certDate'] = permit_dict.get('permit_time', None)  # 发证日期
                    item_pt['preSaleBuilding'] = permit_dict.get('building_list', None)  # 预售楼栋
                    yield item_pt
            else:
                logger.warning(
                    '{}-{}-{} 预售许可证列表为空'.format(item_pt['cityName'], item_pt['projectName'], item_pt['projectUrl']))
                item_pt['preSalePermit'] = None  # 预售许可证
                item_pt['certDate'] = None  # 发证日期
                item_pt['preSaleBuilding'] = None  # 预售楼栋
                yield item_pt

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    def check_params(self, params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        try:
            if isinstance(params, str):
                try:
                    params = regex_1.sub(',', params).split(',')
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数输入错误，请重新输入，例如： scrapy crawl fangduoduo -a crawl=重庆，北京')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            elif isinstance(params, list):
                try:
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            else:
                raise AssertionError('参数类型不支持')
        except Exception as e:
            logger.error(e)
            self.crawler.engine.close_spider(self, '参数错误')

    @staticmethod
    def format_crawl_city_url(city_url):
        """
        城市新房url标准化处理
        :param city_url:
        :return:
        """
        try:
            assert isinstance(city_url, str)
            city_url_pre = city_url.split('.ke.com')[0]
            assert city_url_pre
        except:
            return
        else:
            city_url = city_url_pre + '.ke.com/loupan' if city_url_pre.endswith(
                'fang') else city_url_pre + '.fang.ke.com/loupan'
            return city_url

    @staticmethod
    def get_project_id(project_id_str):
        """
        提取项目id
        :param project_id_str: '{"fb_query_id":"493797050727632896","fb_expo_id":"493797051172229120","fb_item_location":"0","fb_service_id":"1012810001","fb_ab_test_flag":null,"fb_item_id":"620435"}'
        :return:
        """
        try:
            project_id_dict = json.loads(project_id_str)
            project_id = project_id_dict['fb_item_id']
        except:
            return
        else:
            return project_id

    @staticmethod
    def get_project_data(response):
        """
        提取项目详情页data信息
        :param response:
        :return:
        """
        regex = re.compile(r'data = ({.*})')
        try:
            data_str = regex.findall(response.body.decode())[0]
            data_dict = json.loads(data_str)
            assert data_dict
        except:
            return
        else:
            return data_dict

    @staticmethod
    def deal_chronicle(response):
        """
        提取项目纪事信息
        :param response:
        :return:
        """
        chronicle_info = list()
        try:
            chronicle_li = response.xpath("//ul[@class='fenqi-ul']/li")
            fq_li = [fq for fq in response.xpath("//p[@class='fq-row']//text()").extract() if fq and fq.strip()]
            for chronicle in chronicle_li[0:-2]:
                chronicle_text = [chron for chron in chronicle.xpath(".//text()").extract() if chron and chron.strip()]
                chronicle_info.append(chronicle_text)
            chronicle_info.append(fq_li)
        except:
            return
        else:
            return chronicle_info
