# -*- coding: utf-8 -*-
import scrapy

import logging
from copy import copy, deepcopy
import re
import json
import urllib.parse
import pandas as pd
from HifoFzz.items import OfficeItem, ShopsItem

logger = logging.getLogger(__name__)


class BeikeSpider(scrapy.Spider):
    name = 'beike'
    allowed_domains = ['ke.com']
    start_urls = ['https://www.ke.com/city/']
    xzl_base_url = 'https://shangye.ke.com/api/pc/ke/office/building/list'  # 用于写字楼url构造
    sp_base_url = 'https://shangye.ke.com/api/pc/ke/pu/list'  # 用于商铺url构造
    district_li_url = 'https://shangye.ke.com/api/pc/ke/geo/districtlist?cityCode={}'  # GET  用于获取城市行政区
    xzl_detail_url = 'https://shangye.ke.com/api/pc/ke/office/building/detail?id={}&page=1&size=1000&area=&delType={}&cityId={}'  # GET  写字楼楼盘详情url
    xzl_case_url = 'https://shangye.ke.com/api/pc/ke/office/house/detail?id={}&cityId={}'  # GET 写字楼案例详情url
    sp_case_url = 'https://shangye.ke.com/api/pc/ke/pu/detail?id={}&cityId={}'  # GET 商铺案例详情url

    custom_settings = {
        'SCHEDULER_QUEUE_CLASS': 'scrapy_redis.queue.FifoQueue',
        'CONCURRENT_REQUESTS': 7,
        'CASE_ID_FILTER': True,  # 是否开启案例id去重
    }

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        super(BeikeSpider, self).__init__(*args, **kwargs)
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter

    def start_requests(self):
        """
        校验参数，提取爬取城市
        :return:
        """
        cities_li, type = self.get_crawl_or_filter_cities()
        yield scrapy.Request(
            self.start_urls[0],
            meta=dict(cities_li=cities_li, type=type),
            dont_filter=True,
        )

    def parse(self, response, **kwargs):
        """
        爬取城市预处理
        :param response:
        :param kwargs:
        :return:
        """
        cities_li = copy(response.meta['cities_li'])
        type = copy(response.meta['type'])
        if cities_li:
            headers = {'Referer': 'https://www.ke.com/'}
            # 获取省(直辖市)分组
            province_div_li = response.xpath("//div[contains(@data-action,'国内')]//div[@class='city_province']")
            for province_div in province_div_li:
                province_name = province_div.xpath("./div/text()").extract_first().strip()
                # 获取该省下的城市分组
                city_data_li = province_div.xpath("./ul/li")
                for city_data in city_data_li:
                    city_name = city_data.xpath("./a/text()").extract_first()
                    city_url = city_data.xpath("./a/@href").extract_first()
                    # url中含有fang的是新房连接
                    if 'fang' not in city_url:
                        # 爬取列表
                        if type == 1:
                            if ('全国' in cities_li) or (city_name in cities_li):
                                yield response.follow(
                                    city_url,
                                    headers=headers,
                                    callback=self.parse_non_residential_url,
                                    meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name), ),
                                    dont_filter=True,
                                )
                        # 过滤列表
                        if type == 2:
                            if '全国' in cities_li:
                                break
                            elif city_name not in cities_li:
                                yield response.follow(
                                    city_url,
                                    headers=headers,
                                    callback=self.parse_non_residential_url,
                                    meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name), ),
                                    dont_filter=True,
                                )
                    else:
                        logger.warning(f'{city_name} 没有商业办公链接')
        else:
            logger.error('贝壳非住宅城市url列表匹配为空')
            self.crawler.engine.close_spider(self, '贝壳非住宅城市url列表匹配为空')

    def parse_non_residential_url(self, response):
        """
        获取非住宅url（商业办公）
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        try:
            fzz_url = response.xpath(
                "//a[contains(text(),'商业办公')]/@href|//a[contains(text(),'写字楼')]/@href").extract_first()
            if city_name == '佛山':
                fzz_url = 'https://shangye.ke.com/fs'
            elif city_name == '南京':
                fzz_url = 'https://shangye.ke.com/nj'
            elif city_name == '合肥':
                fzz_url = 'https://shangye.ke.com/hf'
            elif city_name == '长沙':
                fzz_url = 'https://shangye.ke.com/cs'
            elif city_name == '郑州':
                fzz_url = 'https://shangye.ke.com/zz'
            elif city_name == '沈阳':
                fzz_url = 'https://shangye.ke.com/sy'
            elif city_name == '石家庄':
                fzz_url = 'https://shangye.ke.com/sjz'
            elif city_name == '廊坊':
                fzz_url = 'https://shangye.ke.com/lf'
            elif city_name == '福州':
                fzz_url = 'https://shangye.ke.com/fz'
            elif city_name == '厦门':
                fzz_url = 'https://shangye.ke.com/xm'
            elif city_name == '温州':
                fzz_url = 'https://shangye.ke.com/wz'
            elif city_name == '金华':
                fzz_url = 'https://shangye.ke.com/jh'
            elif city_name == '南宁':
                fzz_url = 'https://shangye.ke.com/nn'
            elif city_name == '常州':
                fzz_url = 'https://shangye.ke.com/changzhou'
            elif city_name == '台州':
                fzz_url = 'https://shangye.ke.com/taizhou'
            elif city_name == '绍兴':
                fzz_url = 'https://shangye.ke.com/sx'
            elif city_name == '南通':
                fzz_url = 'https://shangye.ke.com/nt'
            elif city_name == '哈尔滨':
                fzz_url = 'https://shangye.ke.com/hrb'
            elif city_name == '长春':
                fzz_url = 'https://shangye.ke.com/cc'
            elif city_name == '太原':
                fzz_url = 'https://shangye.ke.com/ty'
            elif city_name == '保定':
                fzz_url = 'https://shangye.ke.com/baoding'
            elif city_name == '无锡':
                fzz_url = 'https://shangye.ke.com/wx'
            elif city_name == '昆明':
                fzz_url = 'https://shangye.ke.com/km'
            elif city_name == '南昌':
                fzz_url = 'https://shangye.ke.com/nc'
            elif city_name == '中山':
                fzz_url = 'https://shangye.ke.com/zs'
            elif city_name == '珠海':
                fzz_url = 'https://shangye.ke.com/zh'
            elif city_name == '惠州':
                fzz_url = 'https://shangye.ke.com/hui/'
            elif city_name == '嘉兴':
                fzz_url = 'https://shangye.ke.com/jx/'
            elif city_name == '泉州':
                fzz_url = 'https://shangye.ke.com/jx/'
            elif city_name == '兰州':
                fzz_url = 'https://shangye.ke.com/quanzhou/'
            elif city_name == '贵阳':
                fzz_url = 'https://shangye.ke.com/gy/'
            assert fzz_url and 'ke.com' in fzz_url and 'fang' not in fzz_url, f'{city_name} 没有商业办公链接'
        except AssertionError as e:
            logger.warning(e)
        else:
            city_py = fzz_url.split("com/")[1]
            city_id = self.get_city_id(city_name)
            if city_id:
                # 获取 写字楼出租列表首页(不带行政区)
                xzl_rent_request = self.create_xzl_request(city_name, city_id, city_py, page_num=1, del_type=2)
                xzl_rent_request.callback = self.parse_xzl_li
                xzl_rent_request._meta = dict(city_name=city_name, city_id=city_id, city_py=city_py, del_type=2,
                                              province_name=province_name, page_num=1, district_name=None)
                self.crawler.engine.slot.scheduler.enqueue_request(xzl_rent_request)

                # 获取 写字楼出售列表首页(不带行政区)
                xzl_buy_request = self.create_xzl_request(city_name, city_id, city_py, page_num=1, del_type=1)
                xzl_buy_request.callback = self.parse_xzl_li
                xzl_buy_request._meta = dict(city_name=city_name, city_id=city_id, city_py=city_py, del_type=1,
                                             province_name=province_name, page_num=1, district_name=None)
                self.crawler.engine.slot.scheduler.enqueue_request(xzl_buy_request)

                # 获取 商铺出租列表首页(不带行政区)
                sp_rent_request = self.create_sp_request(city_name, city_id, city_py, page_num=1, del_type=2)
                sp_rent_request.callback = self.parse_sp_li
                sp_rent_request._meta = dict(city_name=city_name, city_id=city_id, city_py=city_py, del_type=2,
                                             province_name=province_name, page_num=1, district_name=None)
                self.crawler.engine.slot.scheduler.enqueue_request(sp_rent_request)

                # 获取 商铺出售列表首页(不带行政区)
                sp_buy_request = self.create_sp_request(city_name, city_id, city_py, page_num=1, del_type=1)
                sp_buy_request.callback = self.parse_sp_li
                sp_buy_request._meta = dict(city_name=city_name, city_id=city_id, city_py=city_py, del_type=1,
                                            province_name=province_name, page_num=1, district_name=None)
                self.crawler.engine.slot.scheduler.enqueue_request(sp_buy_request)
            else:
                logger.error(f'{city_name} 行政区划id提取出错')

    def parse_xzl_li(self, response):
        """
        写字楼（出租/出售）列表页
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        city_id = copy(response.meta['city_id'])
        city_py = copy(response.meta['city_py'])
        page_num = copy(response.meta['page_num'])
        district_name = copy(response.meta['district_name'])
        del_type = copy(response.meta['del_type'])
        try:
            resp_dict = json.loads(response.body.decode())
            data_dict = resp_dict.get('data', dict())
            case_li = data_dict.get('docs', list())
            total_num = int(data_dict.get('total', None))
        except Exception as e:
            logger.error('{}-{}-{}-第{}页 写字楼列表提取出错，msg：{}'.format(province_name, city_name, district_name, page_num, e))
        else:
            if total_num == 0:
                logger.warning('{}-{}-{}-第{}页 写字楼总数为0'.format(province_name, city_name, district_name, page_num))
            # 最多显示100页数据（2000条），未超过2000不需要添加查询条件（行政区）进行获取
            elif 0 < total_num <= 2000:
                if case_li:
                    # 写字楼列表数据提取
                    for case_dict in case_li:
                        item_xzl = OfficeItem()
                        item_xzl['provinceName'] = province_name  # 省份
                        item_xzl['cityName'] = city_name  # 城市
                        item_xzl['projectId'] = case_dict.get('id', None)  # 楼盘id
                        item_xzl['name'], item_xzl['buildingName'] = self.get_project_and_building_name(
                            case_dict.get('name', None))  # 楼盘名称 & 楼栋名称
                        item_xzl['districtName'] = case_dict.get('district', None)  # 行政区
                        item_xzl['areaSegment'] = case_dict.get('areaDesc', None)  # 楼盘面积段
                        item_xzl['shangQuan'] = case_dict.get('bizcircle', None)  # 商圈
                        # 出租
                        if del_type == 2:
                            item_xzl['category'] = '写字楼出租'  # 类型
                            item_xzl['projectAvgRental'] = case_dict.get('priceDesc', None)  # 楼盘平均租金
                            xzl_detail_headers = {
                                'Referer': 'https://shangye.ke.com/{}/xzl/rent/slist/{}'.format(city_py,
                                                                                                item_xzl['projectId'])}
                        # 出售
                        else:
                            item_xzl['category'] = '写字楼出售'  # 类型
                            item_xzl['projectAvgPrice'] = case_dict.get('priceDesc', None)  # 楼盘平均售价
                            xzl_detail_headers = {
                                'Referer': 'https://shangye.ke.com/{}/xzl/buy/slist/{}'.format(city_py,
                                                                                               item_xzl['projectId'])}
                        # 获取写字楼楼盘详情
                        yield scrapy.Request(
                            self.xzl_detail_url.format(item_xzl['projectId'], del_type, city_id),
                            headers=xzl_detail_headers,
                            callback=self.parse_xzl_detail,
                            meta=dict(item_xzl=deepcopy(item_xzl), city_py=city_py, city_id=city_id,
                                      del_type=del_type),
                            dont_filter=False,
                        )
                else:
                    logger.error('{}-{}-{}-第{}页 写字楼列表为空'.format(province_name, city_name, district_name, page_num))

                # 写字楼列表翻页
                total_page = total_num // 20 + 1 if total_num % 20 else total_num // 20
                if page_num == 1:
                    for next_page in range(2, total_page + 1):
                        xzl_request = self.create_xzl_request(city_name, city_id, city_py, next_page, del_type,
                                                              district_name)
                        xzl_request.callback = self.parse_xzl_li
                        xzl_request._meta = dict(city_name=city_name, city_id=city_id, city_py=city_py,
                                                 province_name=province_name, page_num=next_page, del_type=del_type,
                                                 district_name=district_name)
                        # 请求6次，用于处理列表页乱序的问题         --2021/7/19
                        for _ in range(6):
                            # 构造请求并直接放入请求队列，避免回调方法影响爬取顺序        --2021/7/19
                            self.crawler.engine.slot.scheduler.enqueue_request(copy(xzl_request))
            # 案例数量超过2000，获取城市行政区
            else:
                # 写字楼出租
                if del_type == 2:
                    # 只需首页获取一次
                    if page_num == 1:
                        if not district_name:
                            xzl_rent_headers = {'Referer': 'https://shangye.ke.com/{}/xzl/rent/mlist'.format(city_py)}
                            yield scrapy.Request(
                                self.district_li_url.format(city_id),
                                headers=xzl_rent_headers,
                                callback=self.parse_district_li,
                                dont_filter=True,  # 处理城市行政区请求被过滤导致数据漏爬的问题       --2021/7/16
                                meta=dict(city_name=city_name, city_id=city_id, city_py=city_py, category='xzl',
                                          province_name=province_name, page_num=page_num, del_type=del_type, ),
                            )
                        else:
                            # 暂未发现带行政区仍超过2000的情况       --2021/6/15
                            logger.error(
                                '{}-{}-{} 写字楼出租（带行政区）总数超过2000，需分开获取'.format(province_name, city_name, district_name))
                # 写字楼出售
                else:
                    # (不带行政区)暂未发现超过2000       --2021/6/18
                    logger.error('{}-{} 写字楼出售（不带行政区）案例数量超过最大获取数量2000'.format(province_name, city_name))

    def parse_sp_li(self, response):
        """
        商铺（出租/出售）列表页
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        city_id = copy(response.meta['city_id'])
        city_py = copy(response.meta['city_py'])
        page_num = copy(response.meta['page_num'])
        district_name = copy(response.meta['district_name'])
        del_type = copy(response.meta['del_type'])
        try:
            resp_dict = json.loads(response.body.decode())
            data_dict = resp_dict.get('data', dict())
            case_li = data_dict.get('docs', list())
            total_num = int(data_dict.get('total', None))
        except Exception as e:
            logger.error('{}-{}-{}-第{}页 商铺列表提取出错，msg：{}'.format(province_name, city_name, district_name, page_num, e))
        else:
            if total_num == 0:
                logger.warning('{}-{}-{}-第{}页 商铺总数为0'.format(province_name, city_name, district_name, page_num))
            # 最多显示100页数据（2000条），未超过2000不需要添加查询条件（行政区）进行获取
            elif 0 < total_num <= 2000:
                if case_li:
                    # 商铺列表数据提取
                    for case_dict in case_li:
                        item_sp = ShopsItem()
                        item_sp['provinceName'] = province_name  # 省份
                        item_sp['cityName'] = city_name  # 城市
                        item_sp['caseId'] = case_dict.get('houseCode', None)  # 案例ID
                        item_sp['districtName'] = case_dict.get('districtName', None)  # 行政区
                        item_sp['shangQuan'] = case_dict.get('bizcircleName', None)  # 商圈
                        item_sp['operationType'] = case_dict.get('businessName', None)  # 经营类型
                        item_sp['decorationState'] = case_dict.get('fitmentName', None)  # 装修情况
                        item_sp['title'] = case_dict.get('title', None)  # 案例标题
                        # 出租案例
                        if del_type == 2:
                            item_sp['category'] = '商铺出租'  # 类型
                            sp_case_headers = {
                                'Referer': 'https://shangye.ke.com/{}/sp/rent/detail/{}'.format(city_py,
                                                                                                item_sp['caseId'])
                            }
                        # 出售案例
                        else:
                            item_sp['category'] = '商铺出售'  # 类型
                            sp_case_headers = {
                                'Referer': 'https://shangye.ke.com/{}/sp/buy/detail/{}'.format(city_py,
                                                                                               item_sp['caseId'])
                            }
                        # 根据案例id去重处理
                        if (self.settings['CASE_ID_FILTER'] and self.redis_conn.sadd('beike_case_id',
                                                                                     item_sp['caseId'])) or (
                                not self.settings['CASE_ID_FILTER']):
                            # 获取案例详情
                            yield scrapy.Request(
                                self.sp_case_url.format(item_sp['caseId'], city_id),
                                headers=sp_case_headers,
                                callback=self.parse_sp_case_detail,
                                meta=dict(item_sp=deepcopy(item_sp), del_type=deepcopy(del_type), ),
                            )
                else:
                    logger.error('{}-{}-{}-第{}页  商铺列表为空'.format(province_name, city_name, district_name, page_num))

                # 商铺列表翻页
                total_page = total_num // 20 + 1 if total_num % 20 else total_num // 20
                if page_num == 1:
                    for next_page in range(2, total_page + 1):
                        sp_request = self.create_sp_request(city_name, city_id, city_py, next_page, del_type,
                                                            district_name)
                        sp_request.callback = self.parse_sp_li
                        sp_request._meta = dict(city_name=city_name, city_id=city_id, city_py=city_py,
                                                province_name=province_name, page_num=next_page, del_type=del_type,
                                                district_name=district_name)
                        # 请求6次，用于处理列表页乱序的问题        --2021/7/19
                        for _ in range(6):
                            # 构造请求并直接放入请求队列，避免回调方法影响爬取顺序        --2021/7/19
                            self.crawler.engine.slot.scheduler.enqueue_request(copy(sp_request))
            # 案例数量超过2000，获取城市行政区
            else:
                # 商铺出租
                if del_type == 2:
                    # 只需首页获取一次
                    if page_num == 1:
                        if not district_name:
                            sp_rent_headers = {'Referer': 'https://shangye.ke.com/{}/sp/rent/mlist'.format(city_py)}
                            yield scrapy.Request(
                                self.district_li_url.format(city_id),
                                headers=sp_rent_headers,
                                callback=self.parse_district_li,
                                dont_filter=True,  # 处理城市行政区请求被过滤导致数据漏爬的问题       --2021/7/16
                                meta=dict(city_name=city_name, city_id=city_id, city_py=city_py, category='sp',
                                          province_name=province_name, page_num=page_num, del_type=del_type, ),
                            )
                        else:
                            # 暂未发现带行政区仍超过2000的情况       --2021/6/22
                            logger.error(
                                '{}-{}-{} 商铺出租（带行政区）数超过2000，需分开获取'.format(province_name, city_name, district_name))
                # 商铺出售
                else:
                    if page_num == 1:
                        if not district_name:
                            sp_buy_headers = {'Referer': 'https://shangye.ke.com/{}/sp/buy/mlist'.format(city_py)}
                            yield scrapy.Request(
                                self.district_li_url.format(city_id),
                                headers=sp_buy_headers,
                                callback=self.parse_district_li,
                                dont_filter=True,  # 处理城市行政区请求被过滤导致数据漏爬的问题       --2021/7/16
                                meta=dict(city_name=city_name, city_id=city_id, city_py=city_py, category='sp',
                                          province_name=province_name, page_num=page_num, del_type=del_type, ),
                            )
                        else:
                            # 暂未发现带行政区仍超过2000的情况       --2021/6/25
                            logger.error(
                                '{}-{}-{} 商铺出售（带行政区）数超过2000，需分开获取'.format(province_name, city_name, district_name))

    def parse_xzl_detail(self, response):
        """
        获取写字楼（出租/出售）楼盘详情
        :param response:
        :return:
        """
        item_xzl = copy(response.meta['item_xzl'])
        city_py = copy(response.meta['city_py'])
        city_id = copy(response.meta['city_id'])
        del_type = copy(response.meta['del_type'])
        try:
            resp_dict = json.loads(response.body.decode())
            data_dict = resp_dict.get('data', dict())
            location = data_dict.get('location', dict())
            case_li = data_dict.get('docs', list())
            assert case_li, '{}-{}-{}-{} 写字楼案例列表为空'.format(item_xzl['cityName'], item_xzl['districtName'],
                                                           item_xzl['name'], item_xzl['projectId'])
        except AssertionError as e:
            logger.warning(e)
        except Exception as e:
            logger.error(
                '{}-{}-{}-{} 写字楼详情信息提取出错，msg：{}'.format(item_xzl['cityName'], item_xzl['districtName'],
                                                        item_xzl['name'], item_xzl['projectId'], e))
        else:
            item_xzl['lng'] = location.get('lng', None)  # 经度
            item_xzl['lat'] = location.get('lat', None)  # 纬度
            item_xzl['developerName'] = data_dict.get('developer', None)  # 开发商
            item_xzl['grossBuildArea'] = data_dict.get('buildArea', None)  # 总建筑面积
            item_xzl['totalFloor'] = data_dict.get('totalFloor', None)  # 总层数
            # 遍历，提取出租案例列表信息
            for case_dict in case_li:
                item_xzl['title'] = case_dict.get('title', None)  # 案例标题
                item_xzl['buildArea'] = case_dict.get('area', None)  # 建筑面积
                item_xzl['decorationState'] = case_dict.get('fitmentName', None)  # 装修情况
                item_xzl['caseId'] = case_dict.get('houseCode', None)  # 案例ID
                # 出租案例
                if del_type == 2:
                    item_xzl['monthlyRental'] = case_dict.get('totalPrice', None)  # 月租金
                    item_xzl['dailyRental'] = case_dict.get('singlePrice', None)  # 日租金
                    xzl_case_headers = {
                        'Referer': 'https://shangye.ke.com/{}/xzl/rent/detail/{}'.format(city_py, item_xzl['caseId'])
                    }
                # 出售案例
                else:
                    item_xzl['totalPrice'] = case_dict.get('totalPrice', None)  # 案例总价
                    item_xzl['price'] = case_dict.get('singlePrice', None)  # 案例单价
                    xzl_case_headers = {
                        'Referer': 'https://shangye.ke.com/{}/xzl/buy/detail/{}'.format(city_py, item_xzl['caseId'])
                    }
                # 根据案例id去重处理
                if (self.settings['CASE_ID_FILTER'] and self.redis_conn.sadd('beike_case_id', item_xzl['caseId'])) or (
                        not self.settings['CASE_ID_FILTER']):
                    # 获取案例详情
                    yield scrapy.Request(
                        self.xzl_case_url.format(item_xzl['caseId'], city_id),
                        headers=xzl_case_headers,
                        callback=self.parse_xzl_case_detail,
                        meta=dict(item_xzl=deepcopy(item_xzl), ),
                    )

    def parse_xzl_case_detail(self, response):
        """
        获取写字楼案例（出租/出售）详情
        :param response:
        :return:
        """
        item_xzl = copy(response.meta['item_xzl'])
        try:
            resp_dict = json.loads(response.body.decode())
            case_dict = resp_dict['data']['docs']
            case_time = case_dict.get('ctime', dict())
            assert case_dict and isinstance(case_dict, dict), '{}-{}-{}-{}-{} 案例详情信息获取为空'.format(
                item_xzl['cityName'], item_xzl['districtName'], item_xzl['name'], item_xzl['title'], item_xzl['caseId']
            )
        except AssertionError as e:
            logger.error(e)
        except Exception as e:
            logger.error('{}-{}-{}-{}-{} 案例详情信息提取出错，msg:{}'.format(
                item_xzl['cityName'], item_xzl['districtName'], item_xzl['name'], item_xzl['title'], item_xzl['caseId'],
                e)
            )
        else:
            # 提取案例详情信息
            item_xzl['houseAdvantages'] = case_dict.get('feature', None)  # 房源特色
            item_xzl['projectBrief'] = case_dict.get('content', None)  # 房源描述
            item_xzl['floorLevels'] = case_dict.get('floorth', None)  # 所在楼层
            item_xzl['floorType'] = case_dict.get('floorPositionName', None)  # 楼层类型
            item_xzl['workingSeat'] = case_dict.get('workPlaceCount', None)  # 工位数
            item_xzl['listingDate'] = case_time.get('date', None)  # 发布日期
            item_xzl['highQualityHouse'] = case_dict.get('isReal', None)  # 优质房情况
            yield item_xzl

    def parse_sp_case_detail(self, response):
        """
        获取商铺案例（出租/出售）详情
        :param response:
        :return:
        """
        item_sp = copy(response.meta['item_sp'])
        del_type = copy(response.meta['del_type'])
        try:
            resp_dict = json.loads(response.body.decode())
            case_dict = resp_dict['data']['docs']
            case_time = case_dict.get('ctime', dict())
            location = case_dict.get('location', dict())
            content_format = case_dict.get('contentFormat', dict())
            content_dict = content_format if content_format or isinstance(content_format, dict) else dict()
            assert case_dict and isinstance(case_dict, dict), '{}-{}-{}-{}-{} 案例详情信息获取为空'.format(
                item_sp['cityName'], item_sp['districtName'], item_sp['name'], item_sp['title'], item_sp['caseId']
            )
        except AssertionError as e:
            logger.error(e)
        except Exception as e:
            logger.error('{}-{}-{}-{}-{} 案例详情信息提取出错，msg:{}'.format(
                item_sp['cityName'], item_sp['districtName'], item_sp['name'], item_sp['title'], item_sp['caseId'],
                e)
            )
        else:
            # 提取案例详情信息
            item_sp['listingDate'] = case_time.get('date', None)  # 发布日期
            item_sp['buildArea'] = case_dict.get('area', None)  # 建筑面积
            item_sp['highQualityHouse'] = case_dict.get('isReal', None)  # 优质房情况
            item_sp['name'] = case_dict.get('resblockName', None)  # 楼盘名称
            item_sp['buildingName'] = case_dict.get('buildingNum', None)  # 楼栋名称
            item_sp['lng'] = location.get('lng', None)  # 经度
            item_sp['lat'] = location.get('lat', None)  # 纬度
            item_sp['communityMark'] = self.deal_content(content_dict.get('lpts', None))  # 楼盘特色
            item_sp['propertyAdvantages'] = self.deal_content(content_dict.get('wyts', None))  # 物业特色
            item_sp['transportFacility'] = self.deal_content(content_dict.get('jtcx', None))  # 交通出行
            item_sp['support'] = self.deal_content(content_dict.get('zbpt', None))  # 配套
            item_sp['other'] = self.deal_content(content_dict.get('other', None))  # 其它
            item_sp['isStreet'] = case_dict.get('isStreet', None)  # 是否临街
            item_sp['form'] = case_dict.get('houseTypeName', None)  # 形态
            item_sp['houseAdvantages'] = case_dict.get('equipName', None)  # 房源特色
            item_sp['floorType'] = case_dict.get('floorTypeName', None)  # 楼层类型
            # 出租
            if del_type == 2:
                item_sp['monthlyRental'] = case_dict.get('totalPrice', None)  # 月租金
                item_sp['dailyRental'] = case_dict.get('singlePrice', None)  # 日租金
            # 出售
            else:
                item_sp['totalPrice'] = case_dict.get('totalPrice', None)  # 案例总价
                item_sp['price'] = case_dict.get('singlePrice', None)  # 案例单价
            yield item_sp

    def parse_district_li(self, response):
        """
        获取城市行政区
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        city_id = copy(response.meta['city_id'])
        city_py = copy(response.meta['city_py'])
        page_num = copy(response.meta['page_num'])
        del_type = copy(response.meta['del_type'])
        category = copy(response.meta['category'])
        try:
            resp_dict = json.loads(response.body.decode())
            data_li = resp_dict.get('data', list())
            data_df = pd.DataFrame(data_li)
            district_li = data_df['name'].to_list()
            assert district_li, '{}-{} 行政区列表获取为空'.format(province_name, city_name)
        except AssertionError as e:
            logger.error(e)
        except Exception as e:
            logger.error('{}-{} 行政区列表获取出错，msg：{}'.format(province_name, city_name, e))
        else:
            for district_name in district_li:
                # 获取 写字楼出租列表首页(带行政区)
                if category == 'xzl':
                    xzl_request = self.create_xzl_request(city_name, city_id, city_py, page_num, del_type,
                                                          district_name)
                    xzl_request.callback = self.parse_xzl_li
                    xzl_request._meta = dict(city_name=city_name, city_id=city_id, city_py=city_py,
                                             province_name=province_name, page_num=page_num, del_type=del_type,
                                             district_name=district_name)
                    yield xzl_request
                # 获取 商铺（出租/出售）列表首页(带行政区)
                if category == 'sp':
                    sp_request = self.create_sp_request(city_name, city_id, city_py, page_num, del_type, district_name)
                    sp_request.callback = self.parse_sp_li
                    sp_request._meta = dict(city_name=city_name, city_id=city_id, city_py=city_py,
                                            province_name=province_name, page_num=page_num, del_type=del_type,
                                            district_name=district_name)
                    yield sp_request

    def create_xzl_request(self, city_name, city_id, city_py, page_num, del_type, district_name=None):
        """
        构造写字楼（出租/出售）请求对象
        :param city_name: 城市名
        :param city_id: 城市区划id
        :param city_py: 城市拼音（缩写/全拼）
        :param page_num: 页数
        :param del_type: 类型：2（出租）、1（出售）
        :param district_name: 行政区名称
        :return:
        """
        xzl_headers = {'Referer': 'https://shangye.ke.com/{}/xzl/rent/mlist'.format(city_py)} if del_type == 2 else {
            'Referer': 'https://shangye.ke.com/{}/xzl/buy/mlist'.format(city_py)}
        # 不带行政区
        if not district_name:
            params_str = '?cityId={}&city={}&page={}&delType={}&diType={}&singlePrice=&area=&buildingSelected=&houseSelected=&fitment=&sorter='.format(
                city_id, urllib.parse.quote(city_name), page_num, del_type, urllib.parse.quote('区域'))
        # 带行政区
        else:
            params_str = '?cityId={}&city={}&page={}&delType={}&diType={}&district={}&singlePrice=&area=&buildingSelected=&houseSelected=&fitment=&sorter='.format(
                city_id, urllib.parse.quote(city_name), page_num, del_type, urllib.parse.quote('区域'),
                urllib.parse.quote(district_name))
        xzl_url = self.xzl_base_url + params_str
        return scrapy.Request(
            xzl_url,
            headers=xzl_headers,
            dont_filter=True,
            priority=50,  # 添加列表页请求权重，优先爬取       --2021/7/16
        )

    def create_sp_request(self, city_name, city_id, city_py, page_num, del_type, district_name=None):
        """
        构造商铺（出租/出售）请求对象
        :param city_name: 城市名
        :param city_id: 城市区划id
        :param city_py: 城市拼音（缩写/全拼）
        :param page_num: 页数
        :param del_type: 类型：2（出租）、1（出售）
        :param district_name: 行政区名称
        :return:
        """
        # 商铺出租
        if del_type == 2:
            sp_headers = {
                'Referer': 'https://shangye.ke.com/{}/sp/rent/mlist'.format(city_py)
            }
            # 不带行政区
            if not district_name:
                params_str = '?cityId={}&city={}&page={}&delType={}&diType={}&totalPrice=&area=&houseType=&business=&equip=&floor=&decoration=&spStatus=&isReal=&sorter='.format(
                    city_id, urllib.parse.quote(city_name), page_num, del_type, urllib.parse.quote('区域'))
            # 带行政区
            else:
                params_str = '?cityId={}&city={}&page={}&delType={}&diType={}&district={}&totalPrice=&area=&houseType=&business=&equip=&floor=&decoration=&spStatus=&isReal=&sorter='.format(
                    city_id, urllib.parse.quote(city_name), page_num, del_type, urllib.parse.quote('区域'),
                    urllib.parse.quote(district_name))
        # 商铺出售
        else:
            sp_headers = {
                'Referer': 'https://shangye.ke.com/{}/sp/buy/mlist'.format(city_py)
            }
            # 不带行政区
            if not district_name:
                params_str = '?cityId={}&city={}&page={}&delType={}&diType={}&totalPrice=&spAreaSelect=&houseType=&business=&equip=&floor=&decoration=&spStatus=&isNew=&houseAge=&isReal=&sorter='.format(
                    city_id, urllib.parse.quote(city_name), page_num, del_type, urllib.parse.quote('区域'))
            # 带行政区
            else:
                params_str = '?cityId={}&city={}&page={}&delType={}&diType={}&district={}&totalPrice=&spAreaSelect=&houseType=&business=&equip=&floor=&decoration=&spStatus=&isNew=&houseAge=&isReal=&sorter='.format(
                    city_id, urllib.parse.quote(city_name), page_num, del_type, urllib.parse.quote('区域'),
                    urllib.parse.quote(district_name))
        sp_url = self.sp_base_url + params_str
        return scrapy.Request(
            sp_url,
            headers=sp_headers,
            dont_filter=True,
            priority=50,  # 添加列表页请求权重，优先爬取       --2021/7/16
        )

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    def check_params(self, params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        try:
            if isinstance(params, str):
                try:
                    params = regex_1.sub(',', params).split(',')
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数输入错误，请重新输入，例如： scrapy crawl fangduoduo -a crawl=重庆，北京')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            elif isinstance(params, list):
                try:
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            else:
                raise AssertionError('参数类型不支持')
        except Exception as e:
            logger.error(e)
            self.crawler.engine.close_spider(self, '参数错误')

    def get_city_id(self, city_name):
        """
        提取城市区划id
        :param city_name:
        :return:
        """
        try:
            # 贝壳网站济南id与民政局不一致       --2021/6/17
            if city_name == '济南':
                city_id = 370101
            elif city_name == '西安':
                city_id = 610100
            elif city_name == '大连':
                city_id = 210200
            elif city_name == '长沙':
                city_id = 430100
            elif city_name == '南昌':
                city_id = 360100
            elif city_name == '中山':
                city_id = 442000
            else:
                city_df = self.pcd_df.loc[self.pcd_df.name.str.contains(city_name)]
                print(city_df)
                city_li = city_df.to_dict(orient='records')
                assert len(city_li) == 1
                city_id = city_li[0]['id']
        except:
            return
        else:
            return city_id

    @staticmethod
    def get_project_and_building_name(_str) -> tuple:
        """
        提取楼盘和楼栋名
        :param _str:
        :return:
        """
        regex = re.compile(r'\s+')
        try:
            ret_li = [i for i in regex.sub(',', _str).split(',') if i.strip()]
            assert len(ret_li) == 2
        except:
            return _str, None
        else:
            return ret_li[0], ret_li[1]

    @staticmethod
    def deal_content(_cont):
        """
        提取content信息
        :param _cont:
        :return:
        """
        try:
            assert _cont and isinstance(_cont, dict)
            ret = _cont.get('desc', None)
        except:
            return
        else:
            return ret
