# -*- coding: utf-8 -*-
import scrapy

import logging
from copy import copy, deepcopy
import re
import json
from HifoFzz.items import OfficeItem, ShopsItem
import datetime

logger = logging.getLogger(__name__)


class BeikeV2Spider(scrapy.Spider):
    name = 'beike_v2'
    allowed_domains = ['ke.com']
    start_urls = ['https://www.ke.com/city/']
    district_li_url = 'https://shangye.ke.com/api/ke/filters/location/xzl_star'  # GET  用于行政区&商圈url构造
    xzl_rent_url = 'https://shangye.ke.com/api/c/house/list'  # GET  用于写字楼出租url构造
    xzl_sell_url = 'https://shangye.ke.com/api/c/house/sell/list'  # GET  用于写字楼出售url构造
    sp_rent_url = 'https://shangye.ke.com/api/c/house/sp_rent_list'  # GET  用于商铺出租url构造
    sp_sell_url = 'https://shangye.ke.com/api/c/shangpu/sell/list'  # GET  用于商铺出售url构造

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        super(BeikeV2Spider, self).__init__(*args, **kwargs)
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter

    def start_requests(self):
        """
        校验参数，提取爬取城市
        :return:
        """
        cities_li, type = self.get_crawl_or_filter_cities()
        yield scrapy.Request(
            self.start_urls[0],
            meta=dict(cities_li=cities_li, type=type),
            dont_filter=True,
        )

    def parse(self, response, **kwargs):
        """
        爬取城市预处理
        :param response:
        :param kwargs:
        :return:
        """
        cities_li = copy(response.meta['cities_li'])
        type = copy(response.meta['type'])
        if cities_li:
            headers = {'Referer': 'https://www.ke.com/'}
            # 获取省(直辖市)分组
            province_div_li = response.xpath("//div[contains(@data-action,'国内')]//div[@class='city_province']")
            for province_div in province_div_li:
                province_name = province_div.xpath("./div/text()").extract_first().strip()
                # 获取该省下的城市分组
                city_data_li = province_div.xpath("./ul/li")
                for city_data in city_data_li:
                    city_name = city_data.xpath("./a/text()").extract_first()
                    city_url = city_data.xpath("./a/@href").extract_first()
                    # url中含有fang的是新房连接
                    if 'fang' not in city_url:
                        # 爬取列表
                        if type == 1:
                            if ('全国' in cities_li) or (city_name in cities_li):
                                yield response.follow(
                                    city_url,
                                    headers=headers,
                                    callback=self.parse_non_residential_url,
                                    meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name), ),
                                    dont_filter=True,
                                )
                        # 过滤列表
                        if type == 2:
                            if '全国' in cities_li:
                                break
                            elif city_name not in cities_li:
                                yield response.follow(
                                    city_url,
                                    headers=headers,
                                    callback=self.parse_non_residential_url,
                                    meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name), ),
                                    dont_filter=True,
                                )
                    else:
                        logger.warning(f'{city_name} 没有商业办公链接')
        else:
            logger.error('贝壳非住宅城市url列表匹配为空')
            self.crawler.engine.close_spider(self, '贝壳非住宅城市url列表匹配为空')

    def parse_non_residential_url(self, response):
        """
        获取非住宅url（商业办公）
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        try:
            fzz_url = response.xpath(
                "//a[contains(text(),'商业办公')]/@href|//a[contains(text(),'写字楼')]/@href").extract_first()
            if city_name == '佛山':
                fzz_url = 'https://shangye.ke.com/fs'
            elif city_name == '南京':
                fzz_url = 'https://shangye.ke.com/nj'
            elif city_name == '合肥':
                fzz_url = 'https://shangye.ke.com/hf'
            elif city_name == '长沙':
                fzz_url = 'https://shangye.ke.com/cs'
            elif city_name == '郑州':
                fzz_url = 'https://shangye.ke.com/zz'
            elif city_name == '沈阳':
                fzz_url = 'https://shangye.ke.com/sy'
            elif city_name == '石家庄':
                fzz_url = 'https://shangye.ke.com/sjz'
            elif city_name == '廊坊':
                fzz_url = 'https://shangye.ke.com/lf'
            elif city_name == '福州':
                fzz_url = 'https://shangye.ke.com/fz'
            elif city_name == '厦门':
                fzz_url = 'https://shangye.ke.com/xm'
            elif city_name == '温州':
                fzz_url = 'https://shangye.ke.com/wz'
            elif city_name == '金华':
                fzz_url = 'https://shangye.ke.com/jh'
            elif city_name == '南宁':
                fzz_url = 'https://shangye.ke.com/nn'
            elif city_name == '常州':
                fzz_url = 'https://shangye.ke.com/changzhou'
            elif city_name == '台州':
                fzz_url = 'https://shangye.ke.com/taizhou'
            elif city_name == '绍兴':
                fzz_url = 'https://shangye.ke.com/sx'
            elif city_name == '南通':
                fzz_url = 'https://shangye.ke.com/nt'
            elif city_name == '哈尔滨':
                fzz_url = 'https://shangye.ke.com/hrb'
            elif city_name == '长春':
                fzz_url = 'https://shangye.ke.com/cc'
            elif city_name == '太原':
                fzz_url = 'https://shangye.ke.com/ty'
            elif city_name == '保定':
                fzz_url = 'https://shangye.ke.com/baoding'
            elif city_name == '无锡':
                fzz_url = 'https://shangye.ke.com/wx'
            elif city_name == '昆明':
                fzz_url = 'https://shangye.ke.com/km'
            elif city_name == '南昌':
                fzz_url = 'https://shangye.ke.com/nc'
            elif city_name == '中山':
                fzz_url = 'https://shangye.ke.com/zs'
            elif city_name == '珠海':
                fzz_url = 'https://shangye.ke.com/zh'
            elif city_name == '惠州':
                fzz_url = 'https://shangye.ke.com/hui/'
            elif city_name == '嘉兴':
                fzz_url = 'https://shangye.ke.com/jx/'
            elif city_name == '泉州':
                fzz_url = 'https://shangye.ke.com/jx/'
            elif city_name == '兰州':
                fzz_url = 'https://shangye.ke.com/quanzhou/'
            elif city_name == '贵阳':
                fzz_url = 'https://shangye.ke.com/gy/'
            assert fzz_url and 'ke.com' in fzz_url and 'fang' not in fzz_url, f'{city_name} 没有商业办公链接'
        except AssertionError as e:
            logger.warning(e)
        else:
            city_py = fzz_url.split("com/")[1]
            city_id = self.get_city_id(city_name)
            if city_id:
                # 获取 行政区和商圈列表
                headers = {'Referer': 'https://shangye.ke.com/{}/xzl_rent.html'.format(city_py)}
                param = '?platform=2&device=1&business_type=2&page_type=2&city_code={}&cityCode={}&menu_type=house%3Amenu%3Ahouse%3Arent'
                yield scrapy.Request(
                    self.district_li_url + param.format(city_id, city_id),
                    headers=headers,
                    callback=self.parse_district_li,
                    meta=dict(city_name=city_name, city_id=city_id, city_py=city_py, province_name=province_name, ),
                    dont_filter=True,
                )
            else:
                logger.error(f'{city_name} 行政区划id提取出错')

    def parse_district_li(self, response):
        """
        获取城市 行政区&商圈 列表
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        city_id = copy(response.meta['city_id'])
        city_py = copy(response.meta['city_py'])
        try:
            resp_dict = json.loads(response.body.decode())
            assert resp_dict['code'] == 100000, '{}-{} 行政区&商圈 列表响应code出错'.format(province_name, city_name)
            district_dict_li = resp_dict['data']['options'][1]['options']
            assert district_dict_li, '{}-{} 行政区&商圈 列表获取为空'.format(province_name, city_name)
        except AssertionError as e:
            logger.error(e)
        except Exception as e:
            logger.error('{}-{} 行政区&商圈 列表获取出错，msg：{}'.format(province_name, city_name, e))
        else:
            for district_dict in district_dict_li[1:]:
                district_name = district_dict['name']
                district_id = district_dict['id']
                shangquan_dict_li = district_dict['options']
                # 区分商圈
                if len(shangquan_dict_li):
                    for shangquan_dict in shangquan_dict_li[1:]:
                        shangquan_name = shangquan_dict['name']
                        shangquan_id = shangquan_dict['id']
                        param_str_1 = '?platform=1&device=1&page={}&city_id={}&size=20&big_bizcircle_id=&district_id={}&bizcircle_id%5B0%5D={}&subway_id=&resblock_id=&query_content=&sorter=11&bigbizcircle_switch=1&business_type={}'
                        # 获取 写字楼出租列表首页
                        xzl_rent_headers = {'Referer': 'https://shangye.ke.com/{}/xzl_rent.html'.format(city_py)}
                        yield scrapy.Request(
                            self.xzl_rent_url + param_str_1.format(0, city_id, district_id, shangquan_id, 2),
                            headers=xzl_rent_headers,
                            callback=self.parse_xzl_li,
                            meta=dict(city_name=city_name, city_id=city_id, district_id=district_id,
                                      shangquan_id=shangquan_id, city_py=city_py, business_type=2,
                                      province_name=province_name, page_num=0, district_name=district_name,
                                      shangquan_name=shangquan_name, ),
                            dont_filter=True,
                            priority=40,
                        )
                        # 获取 写字楼出售列表首页
                        xzl_sell_headers = {'Referer': 'https://shangye.ke.com/{}/xzl_buy.html'.format(city_py)}
                        yield scrapy.Request(
                            self.xzl_sell_url + param_str_1.format(0, city_id, district_id, shangquan_id, 1),
                            headers=xzl_sell_headers,
                            callback=self.parse_xzl_li,
                            meta=dict(city_name=city_name, city_id=city_id, district_id=district_id,
                                      shangquan_id=shangquan_id, city_py=city_py, business_type=1,
                                      province_name=province_name, page_num=0, district_name=district_name,
                                      shangquan_name=shangquan_name, ),
                            dont_filter=True,
                            priority=40,
                        )
                        # 获取 商铺出租列表首页
                        sp_rent_headers = {'Referer': 'https://shangye.ke.com/{}/sp_rent.html'.format(city_py)}
                        yield scrapy.Request(
                            self.sp_rent_url + param_str_1.format(0, city_id, district_id, shangquan_id, 4),
                            headers=sp_rent_headers,
                            callback=self.parse_sp_li,
                            meta=dict(city_name=city_name, city_id=city_id, district_id=district_id,
                                      shangquan_id=shangquan_id, city_py=city_py, business_type=4,
                                      province_name=province_name, page_num=0, district_name=district_name,
                                      shangquan_name=shangquan_name, ),
                            dont_filter=True,
                            priority=20,
                        )
                        # 获取 商铺出售列表首页
                        sp_sell_headers = {'Referer': 'https://shangye.ke.com/{}/sp_buy.html'.format(city_py)}
                        yield scrapy.Request(
                            self.sp_sell_url + param_str_1.format(0, city_id, district_id, shangquan_id, 3),
                            headers=sp_sell_headers,
                            callback=self.parse_sp_li,
                            meta=dict(city_name=city_name, city_id=city_id, district_id=district_id,
                                      shangquan_id=shangquan_id, city_py=city_py, business_type=3,
                                      province_name=province_name, page_num=0, district_name=district_name,
                                      shangquan_name=shangquan_name, ),
                            dont_filter=True,
                            priority=20,
                        )
                # 不区分商圈
                else:
                    param_str_2 = '?platform=1&device=1&page={}&city_id={}&size=20&big_bizcircle_id=&district_id={}&subway_id=&resblock_id=&query_content=&sorter=11&bigbizcircle_switch=1&business_type={}'
                    # 获取 写字楼出租列表首页
                    xzl_rent_headers = {'Referer': 'https://shangye.ke.com/{}/xzl_rent.html'.format(city_py)}
                    yield scrapy.Request(
                        self.xzl_rent_url + param_str_2.format(0, city_id, district_id, 2, ),
                        headers=xzl_rent_headers,
                        callback=self.parse_xzl_li,
                        meta=dict(city_name=city_name, city_id=city_id, district_id=district_id,
                                  shangquan_id=None, city_py=city_py, business_type=2,
                                  province_name=province_name, page_num=0, district_name=district_name,
                                  shangquan_name=None, ),
                        dont_filter=True,
                        priority=40,
                    )
                    # 获取 写字楼出售列表首页
                    xzl_sell_headers = {'Referer': 'https://shangye.ke.com/{}/xzl_buy.html'.format(city_py)}
                    yield scrapy.Request(
                        self.xzl_sell_url + param_str_2.format(0, city_id, district_id, 1, ),
                        headers=xzl_sell_headers,
                        callback=self.parse_xzl_li,
                        meta=dict(city_name=city_name, city_id=city_id, district_id=district_id,
                                  shangquan_id=None, city_py=city_py, business_type=1,
                                  province_name=province_name, page_num=0, district_name=district_name,
                                  shangquan_name=None, ),
                        dont_filter=True,
                        priority=40,
                    )
                    # 获取 商铺出租列表首页
                    sp_rent_headers = {'Referer': 'https://shangye.ke.com/{}/sp_rent.html'.format(city_py)}
                    yield scrapy.Request(
                        self.sp_rent_url + param_str_2.format(0, city_id, district_id, 4, ),
                        headers=sp_rent_headers,
                        callback=self.parse_sp_li,
                        meta=dict(city_name=city_name, city_id=city_id, district_id=district_id,
                                  shangquan_id=None, city_py=city_py, business_type=4,
                                  province_name=province_name, page_num=0, district_name=district_name,
                                  shangquan_name=None, ),
                        dont_filter=True,
                        priority=20,
                    )
                    # 获取 商铺出售列表首页
                    sp_sell_headers = {'Referer': 'https://shangye.ke.com/{}/sp_buy.html'.format(city_py)}
                    yield scrapy.Request(
                        self.sp_sell_url + param_str_2.format(0, city_id, district_id, 3, ),
                        headers=sp_sell_headers,
                        callback=self.parse_sp_li,
                        meta=dict(city_name=city_name, city_id=city_id, district_id=district_id,
                                  shangquan_id=None, city_py=city_py, business_type=3,
                                  province_name=province_name, page_num=0, district_name=district_name,
                                  shangquan_name=None, ),
                        dont_filter=True,
                        priority=20,
                    )

    def parse_xzl_li(self, response):
        """
        写字楼（出租/出售）列表页
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        city_id = copy(response.meta['city_id'])
        city_py = copy(response.meta['city_py'])
        page_num = copy(response.meta['page_num'])
        district_id = copy(response.meta['district_id'])
        district_name = copy(response.meta['district_name'])
        shangquan_id = copy(response.meta['shangquan_id'])
        shangquan_name = copy(response.meta['shangquan_name'])
        business_type = copy(response.meta['business_type'])
        category = '写字楼出租' if business_type == 2 else '写字楼出售'
        try:
            resp_dict = json.loads(response.body.decode())
            assert resp_dict['code'] == 100000, \
                '{}-{}-{}-{}-第{}页 写字楼列表响应code出错'.format(city_name, district_name, shangquan_name, category,
                                                        page_num + 1)
            data_dict = resp_dict.get('data', dict())
            case_li = data_dict.get('docs', list())
            total_num = int(data_dict.get('total', None))
        except AssertionError as e:
            logger.error(e)
        except Exception as e:
            logger.error(
                '{}-{}-{}-{}-第{}页 写字楼列表提取出错，msg：{}'.format(city_name, district_name, shangquan_name, category,
                                                           page_num + 1, e))
        else:
            if len(case_li):
                # 写字楼列表数据提取
                for case_dict in case_li:
                    item_xzl = OfficeItem()
                    item_xzl['provinceName'] = province_name  # 省份
                    item_xzl['cityName'] = city_name  # 城市
                    item_xzl['projectId'] = case_dict.get('resblock_id', None)  # 楼盘id
                    item_xzl['name'] = case_dict.get('resblock_name', None)  # 楼盘名称
                    item_xzl['buildingName'] = case_dict.get('building_name', None)  # 楼栋名称
                    item_xzl['districtName'] = case_dict.get('district_name', None)  # 行政区
                    item_xzl['shangQuan'] = case_dict.get('bizcircle_name', None)  # 商圈
                    item_xzl['category'] = category  # 类型
                    location = case_dict.get('geo', None)
                    item_xzl['lng'], item_xzl['lat'] = self.get_lng_and_lat(location)  # 经度、纬度
                    item_xzl['title'] = case_dict.get('title', None)  # 案例标题
                    item_xzl['buildArea'] = case_dict.get('area', None)  # 建筑面积
                    item_xzl['decorationState'] = case_dict.get('fitment_name', None)  # 装修情况
                    item_xzl['caseId'] = case_dict.get('house_id', None)  # 案例ID
                    item_xzl['houseAdvantages'] = case_dict.get('feature', None)  # 房源特色
                    item_xzl['projectBrief'] = case_dict.get('tags', None)  # 房源描述
                    item_xzl['floorLevels'] = None  # 所在楼层
                    type_num = case_dict.get('floor_position_type', None)
                    item_xzl['floorType'] = self.get_floor_type(type_num)  # 楼层类型
                    item_xzl['workingSeat'] = case_dict.get('max_workstation_count', None)  # 工位数
                    stamp = case_dict.get('ctime', None)
                    item_xzl['listingDate'] = self.transfer_date(stamp)  # 发布日期
                    item_xzl['address'] = case_dict.get('street_name', None)  # 地址
                    item_xzl['isRegister'] = case_dict.get('can_register', None)  # 是否可注册
                    if business_type == 2:
                        item_xzl['monthlyRental'] = case_dict.get('rent_price', None)  # 月租金
                        item_xzl['dailyRental'] = case_dict.get('unit_day_rent_price', None)  # 日租金
                    else:
                        item_xzl['totalPrice'] = case_dict.get('sell_price', None)  # 案例总价
                        item_xzl['price'] = case_dict.get('sell_unit_price', None)  # 案例单价
                    yield item_xzl
            else:
                logger.warning(
                    '{}-{}-{}-{}-第{}页 写字楼列表为空'.format(city_name, district_name, shangquan_name, category, page_num + 1))

            # 翻页
            total_page = total_num // 20 + 1 if total_num % 20 else total_num // 20
            if page_num == 0:
                for next_page in range(2, total_page + 1):
                    page_num = next_page - 1
                    # 写字楼出租
                    if business_type == 2:
                        xzl_url = self.xzl_rent_url
                        xzl_headers = {'Referer': 'https://shangye.ke.com/{}/xzl_rent.html'.format(city_py)}
                    # 写字楼出售
                    else:
                        xzl_url = self.xzl_sell_url
                        xzl_headers = {'Referer': 'https://shangye.ke.com/{}/xzl_buy.html'.format(city_py)}
                    param_str_1 = '?platform=1&device=1&page={}&city_id={}&size=20&big_bizcircle_id=&district_id={}&bizcircle_id%5B0%5D={}&subway_id=&resblock_id=&query_content=&sorter=11&bigbizcircle_switch=1&business_type={}'
                    param_str_2 = '?platform=1&device=1&page={}&city_id={}&size=20&big_bizcircle_id=&district_id={}&subway_id=&resblock_id=&query_content=&sorter=11&bigbizcircle_switch=1&business_type={}'
                    xzl_param = param_str_1.format(page_num, city_id, district_id, shangquan_id, business_type) \
                        if shangquan_id else param_str_2.format(page_num, city_id, district_id, business_type)
                    yield scrapy.Request(
                        xzl_url + xzl_param,
                        headers=xzl_headers,
                        callback=self.parse_xzl_li,
                        meta=dict(city_name=city_name, city_id=city_id, district_id=district_id,
                                  shangquan_id=shangquan_id, city_py=city_py, business_type=business_type,
                                  province_name=province_name, page_num=page_num, district_name=district_name,
                                  shangquan_name=shangquan_name, ),
                        dont_filter=True,
                        priority=40,
                    )

    def parse_sp_li(self, response):
        """
        商铺（出租/出售）列表页
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        city_id = copy(response.meta['city_id'])
        city_py = copy(response.meta['city_py'])
        page_num = copy(response.meta['page_num'])
        district_id = copy(response.meta['district_id'])
        district_name = copy(response.meta['district_name'])
        shangquan_id = copy(response.meta['shangquan_id'])
        shangquan_name = copy(response.meta['shangquan_name'])
        business_type = copy(response.meta['business_type'])
        category = '商铺出租' if business_type == 4 else '商铺出售'
        try:
            resp_dict = json.loads(response.body.decode())
            assert resp_dict['code'] == 100000, \
                '{}-{}-{}-{}-第{}页 商铺列表响应code出错'.format(city_name, district_name, shangquan_name, category,
                                                       page_num + 1)
            data_dict = resp_dict.get('data', dict())
            case_li = data_dict.get('docs', list())
            total_num = int(data_dict.get('total', None))
        except AssertionError as e:
            logger.error(e)
        except Exception as e:
            logger.error(
                '{}-{}-{}-{}-第{}页 商铺列表提取出错，msg：{}'.format(city_name, district_name, shangquan_name, category,
                                                          page_num + 1, e))
        else:
            if len(case_li):
                # 商铺列表数据提取
                for case_dict in case_li:
                    item_sp = ShopsItem()
                    item_sp['provinceName'] = province_name  # 省份
                    item_sp['cityName'] = city_name  # 城市
                    item_sp['projectId'] = case_dict.get('resblock_id', None)  # 楼盘id
                    item_sp['name'] = case_dict.get('resblock_name', None)  # 楼盘名称
                    item_sp['buildingName'] = case_dict.get('building_name', None)  # 楼栋名称
                    item_sp['districtName'] = case_dict.get('district_name', None)  # 行政区
                    item_sp['shangQuan'] = case_dict.get('bizcircle_name', None)  # 商圈
                    item_sp['category'] = category  # 类型
                    location = case_dict.get('geo', None)
                    item_sp['lng'], item_sp['lat'] = self.get_lng_and_lat(location)  # 经度、纬度
                    item_sp['title'] = case_dict.get('title', None)  # 案例标题
                    item_sp['buildArea'] = case_dict.get('area', None)  # 建筑面积
                    item_sp['decorationState'] = case_dict.get('fitment_name', None)  # 装修情况
                    item_sp['caseId'] = case_dict.get('house_id', None)  # 案例ID
                    item_sp['houseAdvantages'] = case_dict.get('feature', None)  # 房源特色
                    item_sp['tag'] = case_dict.get('tags', None)  # 房源描述
                    item_sp['floorLevels'] = None  # 所在楼层
                    type_num = case_dict.get('floor_position_type', None)
                    item_sp['floorType'] = self.get_floor_type(type_num)  # 楼层类型
                    stamp = case_dict.get('ctime', None)
                    item_sp['listingDate'] = self.transfer_date(stamp)  # 发布日期
                    item_sp['address'] = case_dict.get('street_name', None)  # 地址
                    if business_type == 4:
                        item_sp['monthlyRental'] = case_dict.get('rent_price', None)  # 月租金
                        item_sp['dailyRental'] = case_dict.get('unit_day_rent_price', None)  # 日租金
                    else:
                        item_sp['totalPrice'] = case_dict.get('sell_price', None)  # 案例总价
                        item_sp['price'] = case_dict.get('sell_unit_price', None)  # 案例单价
                    yield item_sp
            else:
                logger.warning(
                    '{}-{}-{}-{}-第{}页 商铺列表为空'.format(city_name, district_name, shangquan_name, category, page_num + 1))

            # 翻页
            total_page = total_num // 20 + 1 if total_num % 20 else total_num // 20
            if page_num == 0:
                for next_page in range(2, total_page + 1):
                    page_num = next_page - 1
                    # 商铺出租
                    if business_type == 4:
                        sp_url = self.sp_rent_url
                        sp_headers = {'Referer': 'https://shangye.ke.com/{}/sp_rent.html'.format(city_py)}
                    # 商铺出售
                    else:
                        sp_url = self.sp_sell_url
                        sp_headers = {'Referer': 'https://shangye.ke.com/{}/sp_buy.html'.format(city_py)}
                    param_str_1 = '?platform=1&device=1&page={}&city_id={}&size=20&big_bizcircle_id=&district_id={}&bizcircle_id%5B0%5D={}&subway_id=&resblock_id=&query_content=&sorter=11&bigbizcircle_switch=1&business_type={}'
                    param_str_2 = '?platform=1&device=1&page={}&city_id={}&size=20&big_bizcircle_id=&district_id={}&subway_id=&resblock_id=&query_content=&sorter=11&bigbizcircle_switch=1&business_type={}'
                    sp_param = param_str_1.format(page_num, city_id, district_id, shangquan_id, business_type) \
                        if shangquan_id else param_str_2.format(page_num, city_id, district_id, business_type)
                    yield scrapy.Request(
                        sp_url + sp_param,
                        headers=sp_headers,
                        callback=self.parse_sp_li,
                        meta=dict(city_name=city_name, city_id=city_id, district_id=district_id,
                                  shangquan_id=shangquan_id, city_py=city_py, business_type=business_type,
                                  province_name=province_name, page_num=page_num, district_name=district_name,
                                  shangquan_name=shangquan_name, ),
                        dont_filter=True,
                        priority=20,
                    )

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    def check_params(self, params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        try:
            if isinstance(params, str):
                try:
                    params = regex_1.sub(',', params).split(',')
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数输入错误，请重新输入，例如： scrapy crawl fangduoduo -a crawl=重庆，北京')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            elif isinstance(params, list):
                try:
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            else:
                raise AssertionError('参数类型不支持')
        except Exception as e:
            logger.error(e)
            self.crawler.engine.close_spider(self, '参数错误')

    def get_city_id(self, city_name):
        """
        提取城市区划id
        :param city_name:
        :return:
        """
        try:
            # 贝壳网站部分城市id与民政局不一致
            if city_name == '济南':
                city_id = 370101
            elif city_name == '西安':
                city_id = 610100
            elif city_name == '大连':
                city_id = 210200
            elif city_name == '长沙':
                city_id = 430100
            elif city_name == '南昌':
                city_id = 360100
            elif city_name == '中山':
                city_id = 442000
            else:
                city_df = self.pcd_df.loc[self.pcd_df.name.str.contains(city_name)]
                print(city_df)
                city_li = city_df.to_dict(orient='records')
                assert len(city_li) == 1
                city_id = city_li[0]['id']
        except:
            return
        else:
            return city_id

    @staticmethod
    def get_lng_and_lat(location):
        """
        提取经纬度
        :param location:
        :return:
        """
        try:
            ret = location.split(',')
            lng = float(ret[1])
            lat = float(ret[0])
        except:
            return None, None
        else:
            return lng, lat

    @staticmethod
    def get_floor_type(type_num):
        """
        获取楼层类型
        :param type_num:
        :return:
        """
        type_dict = {
            1: '高层',
            2: '中层',
            3: '低层',
        }
        try:
            type_num = int(type_num) if type_num and isinstance(type_num, str) else type_num
            ret = type_dict.get(type_num, None)
        except:
            return None
        else:
            return ret

    @staticmethod
    def transfer_date(stamp):
        """
        将时间戳转换为date
        :param stamp:
        :return:
        """
        try:
            stamp = int(stamp) if isinstance(stamp, str) else stamp
            date_obj = datetime.datetime.fromtimestamp(stamp, None)
            date_str = date_obj.strftime("%Y-%m-%d")
        except:
            pass
        else:
            return date_str
