# -*- coding: utf-8 -*-
import scrapy
import logging
import re
import pandas as pd
from HifoEsf.utils.m_anjuke_cities import ANJUKE_CITY_LIST
from urllib.parse import quote
from copy import deepcopy, copy
import json
from HifoEsf.utils.m_anjuke_font import font_analyse
from HifoEsf.items import BuildingItem, BuildingUnitItem

logger = logging.getLogger(__name__)


class AnjukeBuildingSpider(scrapy.Spider):
    name = 'anjuke_building'
    allowed_domains = ['m.anjuke.com']
    # start_urls = ['https://m.anjuke.com/yezhu/across/city-select']
    start_urls = ['http://icanhazip.com']
    community_type_temp = 'https://m.anjuke.com/landlord/cross/manage/getCommunityInfo?city_id={}&community_id={}'  # GET
    search_community_temp = 'https://m.anjuke.com/landlord/cross/community/searchCommunity?keyword={}&limit=15&cityId=20&latitude=&longitude=&page=1&q={}&city_id={}'  # GET
    search_community_refer = 'https://m.anjuke.com/yezhu/across/community/search?title=%E9%80%89%E6%8B%A9%E5%B0%8F%E5%8C%BA&from=publish-page&quick_publish=0&create_comm=0&cityId={}&cityName={}&locationCityId='
    search_building_url = 'https://m.anjuke.com/landlord/cross/v1/communities/buildings'  # POST
    search_building_refer = 'https://m.anjuke.com/yezhu/across/fullscreen-modal?city_id={}&quick_publish=0&city_name{}&title=%E9%80%89%E6%8B%A9%E6%A5%BC%E6%A0%8B%E6%88%B7%E5%AE%A4%E5%8F%B7&show_house_number=1'
    custom_settings = {
        'CONCURRENT_REQUESTS': 3,
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'HifoEsf.middlewares.CustomRetryMiddleware': 500,
            'HifoEsf.middlewares.UserAgentMiddleware': 544,
            'HifoEsf.middlewares.OuterNetProxyMiddleware': 545,  # 启用redis中的代理，需要同时开启 RedisConnPipeline
            'HifoEsf.middlewares.AnjukeCookiesMiddleware': 546,
        },
        'ITEM_PIPELINES': {
            'HifoEsf.pipelines.RedisConnPipeline': 299,  # 启用redis中的代理，需要同时开启 XXXProxyMiddleware
            'HifoEsf.pipelines.AnjukeBdPipeline': 301,
            'HifoEsf.pipelines.MongoClientPipeline': 399,
        },
        'RETRY_HTTP_CODES': [500, 502, 503, 504, 400, 404, 408, 407, 302],
        'RETRY_TIMES': 20,
        'CASE_ID_FILTER': True,  # 是否开启案例id去重
        'USER_AGENTS': [
            "Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1",
            "Mozilla/5.0 (Linux; Android 8.0.0; SM-G955U Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Mobile Safari/537.36",
            "Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Mobile Safari/537.36"
        ]
    }

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter
        super().__init__(*args, **kwargs)

    def parse(self, response, **kwargs):
        try:
            # 参数校验
            cities_li, type = self.get_crawl_or_filter_cities()
        except AssertionError as e:
            # 参数校验未通关，关闭爬虫
            self.crawler.engine.close_spider(self, e)
        except Exception as e:
            logger.error(e)
            # 校验未通过，关闭爬虫
            self.crawler.engine.close_spider(self, '参数错误')
        else:
            # 校验通过，城市预处理
            if cities_li:
                cities_df = pd.DataFrame(ANJUKE_CITY_LIST)
                # 爬取列表
                if type == 1:
                    # 如果爬取列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = cities_df.to_dict(orient='records')
                    else:
                        crawl_city_li = cities_df.loc[cities_df.name.isin(cities_li)].to_dict(orient='records')
                # 过滤列表
                else:
                    # 如果过滤列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = list()
                        self.crawler.engine.close_spider(self, '过滤列表包含【全国】')
                    # 不包含 全国
                    else:
                        crawl_city_li = cities_df.loc[~cities_df.name.isin(cities_li)].to_dict(orient='records')
                del cities_df
                # 构造城市二手房楼盘请求
                for esf_request in self.create_city_request(crawl_city_li):
                    yield esf_request
            else:
                logger.error('安居客二手房城市url列表匹配为空')
                self.crawler.engine.close_spider(self, '安居客二手房城市url列表匹配为空')

    def create_city_request(self, crawl_city_li):
        """
        构造城市二手房楼盘请求
        :param crawl_city_li: 爬取城市列表，包含url构造信息
        :return:
        """
        try:
            assert len(crawl_city_li)
        except:
            self.crawler.engine.close_spider(self, '安居客二手房城市爬取列表为空')
        else:
            self.clo.set_db('newhours')
            self.clo.set_col('anjuke_esf_community')
            # 获取城市楼盘
            for crawl_city_dict in crawl_city_li:
                city_name = crawl_city_dict['name']
                city_id = crawl_city_dict['city_id']
                field_dict = {'_id': 0, 'provinceName': 1, 'cityName': 1, 'name': 1, 'guid': 1, 'districtName': 1,
                              'address': 1}
                community_li = list(self.clo._collection.find({'cityName': city_name}, field_dict))
                for community_dict in community_li:
                    if '周边' in community_dict['districtName']:
                        continue
                    community_name = community_dict['name']
                    # 构造查询请求，获取楼盘列表
                    headers = {
                        'referer': self.search_community_refer.format(city_id, quote(city_name))
                    }
                    community_dict['city_id'] = city_id
                    yield scrapy.Request(
                        self.search_community_temp.format(quote(community_name), quote(community_name), city_id, ),
                        headers=headers,
                        callback=self.parse_community_list,
                        meta=dict(community_dict=deepcopy(community_dict), ),
                        dont_filter=True,
                        priority=10,
                    )
                del community_li

    def parse_community_list(self, response):
        """
        获取楼盘列表
        :param response:
        :return:
        """
        community_dict = copy(response.meta['community_dict'])
        try:
            resp = json.loads(response.body.decode())
            data_list = resp['data']['list']
            assert len(data_list), '{}-{}-{} 楼盘查询为空'.format(community_dict['cityName'], community_dict['districtName'],
                                                            community_dict['name'])
        except Exception as e:
            logger.error('{}-{}-{} 楼盘查询出错,error:{}'.format(community_dict['cityName'], community_dict['districtName'],
                                                           community_dict['name'], e))
        else:
            data_df = pd.DataFrame(data_list)
            try:
                community_ret = data_df.loc[
                    (data_df.name == community_dict['name']) & (data_df.area_name == community_dict['districtName']) & (
                            data_df.address == community_dict['address'])].to_dict(orient='records')
                assert len(community_ret)
            except:
                try:
                    community_ret = data_df.loc[(data_df.search_name == community_dict['name']) & (
                            data_df.area_name == community_dict['districtName']) & (
                                                        data_df.address == community_dict['address'])].to_dict(
                        orient='records')
                    assert len(community_ret)
                except:
                    community_ret = data_df.loc[(data_df.name == community_dict['name']) & (
                            data_df.area_name == community_dict['districtName'])].to_dict(orient='records')
            if len(community_ret):
                if len(community_ret) > 1:
                    logging.warning(
                        '{}-{}-{} 楼盘匹配不唯一'.format(community_dict['cityName'], community_dict['districtName'],
                                                  community_dict['name']))
                community_dict['community_id'] = community_ret[0]['community_id']
                community_dict['community_number_type'] = community_ret[0]['community_number_type']
                # 构造请求，获取楼栋列表首页
                page_num = 1
                headers = {
                    'content-type': 'application/json',
                    'accept-encoding': 'gzip, deflate, br',
                    'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
                    'referer': self.search_building_refer.format(community_dict['community_id'],
                                                                 quote(community_dict['cityName'])),
                }
                if community_dict['community_number_type'] == 1:
                    condition_dict = {
                        'bizType': 'BUILDING',
                        'buildingOpenId': '',
                        'communityId': community_dict['community_id'],
                        'floorOpenId': '',
                        'keyword': '',
                        'unitOpenId': '',
                    }
                    data_dict = {
                        'condition': condition_dict,
                        'pageNum': page_num,
                        'pageSize': 50,
                        'type': 'BUILDING',
                    }
                    yield scrapy.Request(
                        self.search_building_url,
                        method='POST',
                        headers=headers,
                        body=json.dumps(data_dict),
                        callback=self.parse_building_li,
                        meta=dict(community_dict=deepcopy(community_dict), page_num=deepcopy(page_num)),
                        dont_filter=True,
                        priority=20,
                    )
                elif community_dict['community_number_type'] == 2:
                    condition_dict = {
                        'bizType': 'BUILDING',
                        'buildingOpenId': '',
                        'communityId': community_dict['community_id'],
                        'floorOpenId': '',
                        'keyword': '',
                        'unitOpenId': '',
                    }
                    data_dict = {
                        'condition': condition_dict,
                        'pageNum': page_num,
                        'pageSize': 50,
                        'type': 'BUILDING',
                    }
                    yield scrapy.Request(
                        self.search_building_url,
                        method='POST',
                        headers=headers,
                        body=json.dumps(data_dict),
                        callback=self.parse_building_li,
                        meta=dict(community_dict=deepcopy(community_dict), page_num=deepcopy(page_num)),
                        dont_filter=True,
                        priority=20,
                    )
                elif community_dict['community_number_type'] == 3:
                    condition_dict = {
                        'bizType': 'UNIT',
                        'buildingOpenId': '',
                        'communityId': community_dict['community_id'],
                        'floorOpenId': '',
                        'keyword': '',
                        'unitOpenId': '',
                    }
                    data_dict = {
                        'condition': condition_dict,
                        'pageNum': page_num,
                        'pageSize': 50,
                        'type': 'UNIT',
                    }
                    yield scrapy.Request(
                        self.search_building_url,
                        method='POST',
                        headers=headers,
                        body=json.dumps(data_dict),
                        callback=self.parse_building_li,
                        meta=dict(community_dict=deepcopy(community_dict), page_num=deepcopy(page_num)),
                        dont_filter=True,
                        priority=20,
                    )
                elif community_dict['community_number_type'] == 4:
                    item_bd = BuildingItem()
                    item_bd['provinceName'] = community_dict['provinceName']  # 省份
                    item_bd['cityName'] = community_dict['cityName']  # 城市
                    item_bd['districtName'] = community_dict['districtName']  # 行政区
                    item_bd['cityId'] = community_dict['city_id']  # 城市ID
                    item_bd['name'] = community_dict['name']  # 楼盘名称
                    item_bd['address'] = community_dict['address']  # 楼盘地址
                    item_bd['guid'] = community_dict['guid']  # 楼盘ID
                    item_bd['other'] = community_dict['community_id']  # 网站楼盘ID
                    item_bd['buildingId'] = str(community_dict['community_id'])  # 楼栋id
                    item_bd['buildingName'] = community_dict['name']  # 楼栋名字
                    item_bd['unitCount'] = 1  # 单元数
                    item_bd['unitSet'] = ['1单元']  # 单元
                    yield item_bd

                    item_unit = BuildingUnitItem()
                    item_unit['provinceName'] = item_bd['provinceName']  # 省份
                    item_unit['cityName'] = item_bd['cityName']  # 城市
                    item_unit['districtName'] = item_bd['districtName']  # 行政区
                    item_unit['name'] = item_bd['name']  # 楼盘名称
                    item_unit['guid'] = item_bd['guid']  # 楼盘ID
                    item_unit['other'] = item_bd['other']  # 网站楼盘ID
                    item_unit['buildingId'] = item_bd['buildingId']  # 楼栋id
                    item_unit['buildingName'] = item_bd['buildingName']  # 楼栋名称
                    item_unit['unitId'] = None  # 单元id
                    item_unit['unitName'] = '1单元'  # 单元名称
                    condition_dict = {
                        'bizType': 'FLOOR',
                        'buildingOpenId': '',
                        'communityId': community_dict['community_id'],
                        'floorOpenId': '',
                        'keyword': '',
                        'unitOpenId': '',
                    }
                    data_dict = {
                        'condition': condition_dict,
                        'pageNum': page_num,
                        'pageSize': 50,
                        'type': 'FLOOR',
                    }
                    yield scrapy.Request(
                        self.search_building_url,
                        method='POST',
                        headers=headers,
                        body=json.dumps(data_dict),
                        callback=self.parse_floor_li,
                        meta=dict(item_unit=deepcopy(item_unit),
                                  community_number_type=deepcopy(community_dict['community_number_type']), ),
                        dont_filter=True,
                        priority=40,
                    )
                else:
                    logger.error(
                        '{}-{}-{} 楼盘类型异常，error：{}'.format(community_dict['cityName'], community_dict['districtName'],
                                                          community_dict['name'],
                                                          community_dict['community_number_type']))
            else:
                logging.warning('{}-{}-{} 楼盘匹配为空'.format(community_dict['cityName'], community_dict['districtName'],
                                                         community_dict['name']))

    def parse_building_li(self, response):
        """
        获取楼栋列表
        :param response:
        :return:
        """
        community_dict = copy(response.meta['community_dict'])
        page_num = copy(response.meta['page_num'])
        try:
            resp = json.loads(response.body.decode())
            data_list = resp['data']['results']
            assert len(data_list), '{}-{}-{}-第{}页 楼栋列表获取为空'.format(community_dict['cityName'],
                                                                   community_dict['districtName'],
                                                                   community_dict['name'], page_num)
            font_body = resp['data']['fontInfo']['woffFontBody']
            total_num = resp['data']['total']
            page_size = resp['data']['pageSize']
        except AssertionError as e:
            logger.warning(e)
        except Exception as e:
            logger.error(
                '{}-{}-{}-第{}页 楼栋列表解析出错,error:{}'.format(community_dict['cityName'], community_dict['districtName'],
                                                         community_dict['name'], page_num, e))
        else:
            font_dict = font_analyse(font_body)
            for data_dict in data_list:
                item_bd = BuildingItem()
                item_bd['provinceName'] = community_dict['provinceName']  # 省份
                item_bd['cityName'] = community_dict['cityName']  # 城市
                item_bd['districtName'] = community_dict['districtName']  # 行政区
                item_bd['cityId'] = community_dict['city_id']  # 城市ID
                item_bd['name'] = community_dict['name']  # 楼盘名称
                item_bd['address'] = community_dict['address']  # 楼盘地址
                item_bd['guid'] = community_dict['guid']  # 楼盘ID
                item_bd['other'] = community_dict['community_id']  # 网站楼盘ID
                item_bd['buildingId'] = data_dict['bizOpenId']  # 楼栋id
                bd_name = data_dict['encryptBizName']
                item_bd['buildingName'] = self.deal_name(font_dict, bd_name, 1)  # 楼栋名字
                headers = {
                    'content-type': 'application/json',
                    'accept-encoding': 'gzip, deflate, br',
                    'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
                    'referer': self.search_building_refer.format(community_dict['community_id'],
                                                                 quote(community_dict['cityName'])),
                }
                # 构造请求，获取单元列表
                if community_dict['community_number_type'] == 1:
                    condition_dict = {
                        'bizType': 'UNIT',
                        'buildingOpenId': item_bd['buildingId'],
                        'communityId': item_bd['other'],
                        'floorOpenId': '',
                        'keyword': '',
                        'unitOpenId': '',
                    }
                    data_dict = {
                        'condition': condition_dict,
                        'pageNum': 1,
                        'pageSize': 50,
                        'type': 'UNIT',
                    }
                    yield scrapy.Request(
                        self.search_building_url,
                        method='POST',
                        headers=headers,
                        body=json.dumps(data_dict),
                        callback=self.parse_unit_li,
                        meta=dict(item_bd=deepcopy(item_bd),
                                  community_number_type=deepcopy(community_dict['community_number_type']), ),
                        dont_filter=True,
                        priority=30,
                    )
                # 构造请求，获取楼层列表
                elif community_dict['community_number_type'] == 2:
                    item_bd['unitCount'] = 1  # 单元数
                    item_bd['unitSet'] = ['1单元']  # 单元
                    yield item_bd

                    item_unit = BuildingUnitItem()
                    item_unit['provinceName'] = item_bd['provinceName']  # 省份
                    item_unit['cityName'] = item_bd['cityName']  # 城市
                    item_unit['districtName'] = item_bd['districtName']  # 行政区
                    item_unit['name'] = item_bd['name']  # 楼盘名称
                    item_unit['guid'] = item_bd['guid']  # 楼盘ID
                    item_unit['other'] = item_bd['other']  # 网站楼盘ID
                    item_unit['buildingId'] = item_bd['buildingId']  # 楼栋id
                    item_unit['buildingName'] = item_bd['buildingName']  # 楼栋名称
                    item_unit['unitId'] = None  # 单元id
                    item_unit['unitName'] = '1单元'  # 单元名称
                    condition_dict = {
                        'bizType': 'FLOOR',
                        'buildingOpenId': item_unit['buildingId'],
                        'communityId': item_unit['other'],
                        'floorOpenId': '',
                        'keyword': '',
                        'unitOpenId': '',
                    }
                    data_dict = {
                        'condition': condition_dict,
                        'pageNum': 1,
                        'pageSize': 50,
                        'type': 'FLOOR',
                    }
                    yield scrapy.Request(
                        self.search_building_url,
                        method='POST',
                        headers=headers,
                        body=json.dumps(data_dict),
                        callback=self.parse_floor_li,
                        meta=dict(item_unit=deepcopy(item_unit),
                                  community_number_type=deepcopy(community_dict['community_number_type']), ),
                        dont_filter=True,
                        priority=40,
                    )
                # 构造请求，获取楼层列表
                elif community_dict['community_number_type'] == 3:
                    item_bd['unitCount'] = 1  # 单元数
                    item_bd['unitSet'] = ['1单元']  # 单元
                    yield item_bd

                    item_unit = BuildingUnitItem()
                    item_unit['provinceName'] = item_bd['provinceName']  # 省份
                    item_unit['cityName'] = item_bd['cityName']  # 城市
                    item_unit['districtName'] = item_bd['districtName']  # 行政区
                    item_unit['name'] = item_bd['name']  # 楼盘名称
                    item_unit['guid'] = item_bd['guid']  # 楼盘ID
                    item_unit['other'] = item_bd['other']  # 网站楼盘ID
                    item_unit['buildingId'] = item_bd['buildingId']  # 楼栋id
                    item_unit['buildingName'] = item_bd['buildingName']  # 楼栋名称
                    item_unit['unitId'] = None  # 单元id
                    item_unit['unitName'] = '1单元'  # 单元名称
                    condition_dict = {
                        'bizType': 'FLOOR',
                        'buildingOpenId': '',
                        'communityId': item_unit['other'],
                        'floorOpenId': '',
                        'keyword': '',
                        'unitOpenId': item_unit['buildingId'],
                    }
                    data_dict = {
                        'condition': condition_dict,
                        'pageNum': 1,
                        'pageSize': 50,
                        'type': 'FLOOR',
                    }
                    yield scrapy.Request(
                        self.search_building_url,
                        method='POST',
                        headers=headers,
                        body=json.dumps(data_dict),
                        callback=self.parse_floor_li,
                        meta=dict(item_unit=deepcopy(item_unit),
                                  community_number_type=deepcopy(community_dict['community_number_type']), ),
                        dont_filter=True,
                        priority=40,
                    )

            # 翻页
            if page_num == 1:
                total_page = total_num // page_size + 1 if total_num % page_size else total_num // page_size
                for next_page_num in range(2, total_page + 1):
                    headers = {
                        'content-type': 'application/json',
                        'accept-encoding': 'gzip, deflate, br',
                        'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
                        'referer': self.search_building_refer.format(community_dict['community_id'],
                                                                     quote(community_dict['cityName'])),
                    }
                    if community_dict['community_number_type'] == 1 or community_dict['community_number_type'] == 2:
                        condition_dict = {
                            'bizType': 'BUILDING',
                            'buildingOpenId': '',
                            'communityId': community_dict['community_id'],
                            'floorOpenId': '',
                            'keyword': '',
                            'unitOpenId': '',
                        }
                        data_dict = {
                            'condition': condition_dict,
                            'pageNum': next_page_num,
                            'pageSize': 50,
                            'type': 'BUILDING',
                        }
                    else:
                        condition_dict = {
                            'bizType': 'UNIT',
                            'buildingOpenId': '',
                            'communityId': community_dict['community_id'],
                            'floorOpenId': '',
                            'keyword': '',
                            'unitOpenId': '',
                        }
                        data_dict = {
                            'condition': condition_dict,
                            'pageNum': next_page_num,
                            'pageSize': 50,
                            'type': 'UNIT',
                        }
                    yield scrapy.Request(
                        self.search_building_url,
                        method='POST',
                        headers=headers,
                        body=json.dumps(data_dict),
                        callback=self.parse_building_li,
                        meta=dict(community_dict=deepcopy(community_dict), page_num=deepcopy(next_page_num)),
                        dont_filter=True,
                        priority=20,
                    )

    def parse_unit_li(self, response):
        """
        获取单元列表
        :param response:
        :return:
        """
        item_bd = copy(response.meta['item_bd'])
        community_number_type = copy(response.meta['community_number_type'])
        try:
            resp = json.loads(response.body.decode())
            data_list = resp['data']['results']
            assert len(data_list), '{}-{}-{}-{} 单元列表获取为空'.format(item_bd['cityName'], item_bd['districtName'],
                                                                 item_bd['name'], item_bd['buildingName'])
            font_body = resp['data']['fontInfo']['woffFontBody']
        except AssertionError as e:
            logger.warning(e)
        except Exception as e:
            logger.error(
                '{}-{}-{}-{} 单元列表解析出错，error:{}'.format(item_bd['cityName'], item_bd['districtName'], item_bd['name'],
                                                       item_bd['buildingName'], e))
        else:
            font_dict = font_analyse(font_body)
            unit_num = len(data_list)
            unit_li = list()
            for data_dict in data_list:
                item_unit = BuildingUnitItem()
                item_unit['provinceName'] = item_bd['provinceName']  # 省份
                item_unit['cityName'] = item_bd['cityName']  # 城市
                item_unit['districtName'] = item_bd['districtName']  # 行政区
                item_unit['name'] = item_bd['name']  # 楼盘名称
                item_unit['guid'] = item_bd['guid']  # 楼盘ID
                item_unit['other'] = item_bd['other']  # 网站楼盘ID
                item_unit['buildingId'] = item_bd['buildingId']  # 楼栋id
                item_unit['buildingName'] = item_bd['buildingName']  # 楼栋名称
                item_unit['unitId'] = data_dict['bizOpenId']  # 单元id
                unit_name = data_dict['encryptBizName']
                item_unit['unitName'] = self.deal_name(font_dict, unit_name, 2)  # 单元名称
                unit_li.append(item_unit['unitName'])
                # 构造请求，获取楼层列表
                headers = {
                    'content-type': 'application/json',
                    'accept-encoding': 'gzip, deflate, br',
                    'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
                    'referer': self.search_building_refer.format(item_unit['other'], quote(item_unit['cityName'])),
                }
                condition_dict = {
                    'bizType': 'FLOOR',
                    'buildingOpenId': item_unit['buildingId'],
                    'communityId': item_unit['other'],
                    'floorOpenId': '',
                    'keyword': '',
                    'unitOpenId': item_unit['unitId'],
                }
                data_dict = {
                    'condition': condition_dict,
                    'pageNum': 1,
                    'pageSize': 50,
                    'type': 'FLOOR',
                }
                yield scrapy.Request(
                    self.search_building_url,
                    method='POST',
                    headers=headers,
                    body=json.dumps(data_dict),
                    callback=self.parse_floor_li,
                    meta=dict(item_unit=deepcopy(item_unit), community_number_type=deepcopy(community_number_type), ),
                    dont_filter=True,
                    priority=40,
                )

            item_bd['unitCount'] = unit_num  # 单元数
            item_bd['unitSet'] = unit_li  # 单元
            yield item_bd

    def parse_floor_li(self, response):
        """
        获取楼层列表
        :param response:
        :return:
        """
        item_unit = copy(response.meta['item_unit'])
        community_number_type = copy(response.meta['community_number_type'])
        try:
            resp = json.loads(response.body.decode())
            data_list = resp['data']['results']
            assert len(data_list), '{}-{}-{}-{}-{} 楼层列表获取为空'.format(item_unit['cityName'], item_unit['districtName'],
                                                                    item_unit['name'], item_unit['buildingName'],
                                                                    item_unit['unitName'])
            font_body = resp['data']['fontInfo']['woffFontBody']
            total_num = resp['data']['total']
            page_size = resp['data']['pageSize']
            font_dict = font_analyse(font_body)
            floor_first = int(self.deal_name(font_dict, data_list[0]['encryptBizName'], 3))
            floor_last = int(self.deal_name(font_dict, data_list[-1]['encryptBizName'], 3))
            mid_dict = data_list[(len(data_list) // 2)]
            floor_mid = int(self.deal_name(font_dict, mid_dict['encryptBizName'], 3))
        except AssertionError as e:
            logger.warning(e)
        except Exception as e:
            logger.error(
                '{}-{}-{}-{}-{} 楼层列表解析出错，error:{}'.format(item_unit['cityName'], item_unit['districtName'],
                                                          item_unit['name'], item_unit['buildingName'],
                                                          item_unit['unitName'], e))
        else:
            total_floor = self.get_total_floor(floor_first, floor_last, total_num, page_size)
            item_unit['totalFloor'] = total_floor  # 总楼层
            item_unit['floorStart'] = floor_first  # 楼层（起）
            item_unit['floorEnd'] = total_floor  # 楼层（止）
            # 构造请求，获取中间层房号列表首页
            page_num = 1
            headers = {
                'content-type': 'application/json',
                'accept-encoding': 'gzip, deflate, br',
                'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
                'referer': self.search_building_refer.format(item_unit['other'], quote(item_unit['cityName'])),
            }
            if community_number_type == 1 or community_number_type == 2:
                condition_dict = {
                    'bizType': 'HOUSE',
                    'buildingOpenId': item_unit['buildingId'],
                    'communityId': item_unit['other'],
                    'floorOpenId': mid_dict['bizOpenId'],
                    'keyword': '',
                    'unitOpenId': item_unit['unitId'] if item_unit['unitId'] else '',
                }
            elif community_number_type == 3:
                condition_dict = {
                    'bizType': 'HOUSE',
                    'buildingOpenId': '',
                    'communityId': item_unit['other'],
                    'floorOpenId': mid_dict['bizOpenId'],
                    'keyword': '',
                    'unitOpenId': item_unit['buildingId'],
                }
            else:
                condition_dict = {
                    'bizType': 'HOUSE',
                    'buildingOpenId': '',
                    'communityId': item_unit['other'],
                    'floorOpenId': mid_dict['bizOpenId'],
                    'keyword': '',
                    'unitOpenId': '',
                }
            data_dict = {
                'condition': condition_dict,
                'pageNum': page_num,
                'pageSize': 50,
                'type': 'HOUSE',
            }
            yield scrapy.Request(
                self.search_building_url,
                method='POST',
                headers=headers,
                body=json.dumps(data_dict),
                callback=self.parse_house_li,
                meta=dict(item_unit=deepcopy(item_unit), floor_mid=deepcopy(floor_mid),
                          floor_open_id=deepcopy(mid_dict['bizOpenId']), page_num=deepcopy(page_num),
                          community_number_type=deepcopy(community_number_type), ),
                dont_filter=True,
                priority=50,
            )

    def parse_house_li(self, response):
        """
        获取房号列表
        :param response:
        :return:
        """
        item_unit = copy(response.meta['item_unit'])
        floor_mid = copy(response.meta['floor_mid'])
        floor_open_id = copy(response.meta['floor_open_id'])
        page_num = copy(response.meta['page_num'])
        community_number_type = copy(response.meta['community_number_type'])
        room_li = list() if page_num == 1 else copy(response.meta['room_li'])
        try:
            resp = json.loads(response.body.decode())
            data_list = resp['data']['results']
            assert len(data_list), '{}-{}-{}-{}-第{}层 房号列表获取为空'.format(item_unit['cityName'], item_unit['name'],
                                                                      item_unit['buildingName'], item_unit['unitName'],
                                                                      floor_mid, )
            font_body = resp['data']['fontInfo']['woffFontBody']
            total_num = resp['data']['total']
            page_size = resp['data']['pageSize']
        except AssertionError as e:
            logger.error(e)
        except Exception as e:
            logger.error('{}-{}-{}-{}-第{}层 房号列表提取出错，error:{}'.format(item_unit['cityName'], item_unit['name'],
                                                                     item_unit['buildingName'], item_unit['unitName'],
                                                                     floor_mid, e, ))
        else:
            font_dict = font_analyse(font_body)
            for data_dict in data_list:
                room_name = self.deal_name(font_dict, data_dict['encryptBizName'], 3)
                room_li.append(room_name)
            # 翻页
            total_page = total_num // page_size + 1 if total_num % page_size else total_num // page_size
            next_page_num = page_num + 1
            if next_page_num <= total_page:
                headers = {
                    'content-type': 'application/json',
                    'accept-encoding': 'gzip, deflate, br',
                    'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
                    'referer': self.search_building_refer.format(item_unit['other'], quote(item_unit['cityName'])),
                }
                if community_number_type == 1 or community_number_type == 2:
                    condition_dict = {
                        'bizType': 'HOUSE',
                        'buildingOpenId': item_unit['buildingId'],
                        'communityId': item_unit['other'],
                        'floorOpenId': floor_open_id,
                        'keyword': '',
                        'unitOpenId': item_unit['unitId'] if item_unit['unitId'] else '',
                    }
                else:
                    condition_dict = {
                        'bizType': 'HOUSE',
                        'buildingOpenId': '',
                        'communityId': item_unit['other'],
                        'floorOpenId': floor_open_id,
                        'keyword': '',
                        'unitOpenId': item_unit['buildingId'],
                    }
                data_dict = {
                    'condition': condition_dict,
                    'pageNum': next_page_num,
                    'pageSize': 50,
                    'type': 'HOUSE',
                }
                yield scrapy.Request(
                    self.search_building_url,
                    method='POST',
                    headers=headers,
                    body=json.dumps(data_dict),
                    callback=self.parse_house_li,
                    meta=dict(item_unit=deepcopy(item_unit), floor_mid=deepcopy(floor_mid),
                              floor_open_id=deepcopy(floor_open_id), page_num=deepcopy(next_page_num),
                              community_number_type=deepcopy(community_number_type), room_li=deepcopy(room_li), ),
                    dont_filter=True,
                    priority=50,
                )
            else:
                item_unit['roomNamePerFloor'] = room_li  # 标准层房号名称
                item_unit['roomNoPerFloor'] = len(room_li)  # 标准层户数
                yield item_unit

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    def check_params(self, params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        try:
            if isinstance(params, str):
                try:
                    params = regex_1.sub(',', params).split(',')
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数输入错误，请重新输入，例如： scrapy crawl anjuke -a crawl=重庆，北京')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            elif isinstance(params, list):
                try:
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            else:
                raise AssertionError('参数类型不支持')
        except Exception as e:
            logger.error(e)
            self.crawler.engine.close_spider(self, '参数错误')

    def deal_name(self, font_dict, name_str, type):
        """
        解析名称
        :param font_dict:
        :param name_str:
        :param type:
        :return:
        """
        try:
            name_fmt = name_str.replace('\\U000', 'uni')
            for key, value in font_dict.items():
                name_fmt = name_fmt.replace(key, value)
            if type == 1:
                name_fmt = name_fmt + '号' if not self.get_chn_char(name_fmt) else name_fmt
            elif type == 2:
                name_fmt = name_fmt + '单元' if not self.get_chn_char(name_fmt) else name_fmt
            elif type == 3:
                name_fmt = name_fmt
        except:
            return
        else:
            return name_fmt

    @staticmethod
    def get_chn_char(char_str):
        """
        匹配中文
        :param char_str:
        :return:
        """
        regex = re.compile(r'[\u4E00-\u9FFF]+')
        try:
            ret = regex.findall(char_str)
            assert len(ret)
        except:
            return False
        else:
            return True

    @staticmethod
    def get_total_floor(floor_first, floor_last, total_num, page_size):
        """
        获取总楼层
        :param floor_first:
        :param floor_last:
        :param total_num:
        :param page_size: 50
        :return:
        """
        if total_num > page_size:
            if floor_first < 0:
                ret = total_num + floor_first
            elif floor_first == 1:
                ret = total_num
            else:
                ret = total_num + floor_first - 1
        else:
            ret = floor_last
        return ret
