import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_redis.spiders import RedisCrawlSpider
import logging
import urllib.parse
from copy import copy, deepcopy
from HifoEsf.items import CommunityItem, CaseItem
import json
import re
from collections import OrderedDict
from typing import Optional

logger = logging.getLogger(__name__)


class FangduoduoSpider(CrawlSpider):
    name = 'fangduoduo'
    allowed_domains = ['fangdd.com', ]
    start_urls = ['https://shanghai.fangdd.com/']
    community_li_next_page_url = 'https://{}.fangdd.com/web-api/data/fetchXqList'  # POST 小区列表翻页url

    rules = (
        Rule(
            LinkExtractor(restrict_xpaths="//dl[@class='_1bLmw']/dd/a"),
            callback='parse_distrct_li',
            follow=False,
            process_links='process_links_city_li',
            process_request='process_request_city_li',
        ),
    )

    crawl: Optional[str] = None
    filter: Optional[str] = None

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        super(FangduoduoSpider, self).__init__(*args, **kwargs)
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter

    def parse_distrct_li(self, response):
        """
        获取行政区/县列表
        :param response:
        :return:
        """
        city_name = response.meta['link_text']
        # 获取省名、省id、城市id
        esf_province_name, esf_province_id, esf_city_id = self.get_province_id_and_city_id(city_name, self.pcd_df)
        try:
            resp = json.loads(response.body.decode())
            region_info_dict = resp['commHeader']['regionInfo']
            city_id = region_info_dict.get('id', None)
            city_pinyin = region_info_dict.get('pinYin', None)
            assert city_id and city_pinyin, f'{city_name} city_id和city_pinyin获取出错'
            # 房多多网站暂未开放部分城市的小区页面
            city_xq_li = resp['footer']['cityXq']
            assert city_xq_li, f'{city_name} 行政区/县列表获取为空【Tips：房多多网站暂未开放部分城市的小区页面，请仔细核对【{city_name}】是否已开放!】'
        except AssertionError as e:
            logger.warning(e)
        except Exception:
            logger.error(f'{city_name} 获取行政区/县列表失败')
        else:
            # 遍历行政区/县列表，构造请求，获取小区列表
            for city_xq in city_xq_li:
                district_url_str = city_xq['url']
                district_str = city_xq['text']
                # 获取区/县id
                esf_district_id = self.get_district_id(city_name, esf_city_id, district_str, self.pcd_df)
                # 构造请求参数
                params_dict = self.create_cmty_li_params(city_id=city_id, page=1, url=district_url_str)
                if params_dict:
                    # 构造请求头
                    referer_url = response.request.url.replace('/xiaoqu/?SSR_JSON=true', district_url_str)
                    headers = {
                        'referer': referer_url,
                        'content-type': 'application/json; charset=utf-8',
                    }
                    yield scrapy.Request(
                        self.community_li_next_page_url.format(city_pinyin),
                        method='POST',
                        headers=headers,
                        body=json.dumps(params_dict),
                        callback=self.parse_community_li,
                        meta=dict(
                            city_name=city_name,
                            city_id=city_id,
                            page_num=1,
                            district_str=deepcopy(district_str),
                            district_url_str=deepcopy(district_url_str),
                            esf_province_name=esf_province_name,
                            esf_province_id=esf_province_id,
                            esf_city_id=esf_city_id,
                            esf_district_id=esf_district_id,
                        ),
                        priority=2,
                    )
                else:
                    logger.error(f'{city_name}-{district_str}-第1页 请求参数构造出错')
            del city_xq_li
            del resp
            del response

    def parse_community_li(self, response):
        """
        获取小区列表
        :param response:
        :return:
        """
        city_name = copy(response.meta['city_name'])
        city_id = copy(response.meta['city_id'])
        page_num = copy(response.meta['page_num'])
        district_str = copy(response.meta['district_str'])
        district_url_str = copy(response.meta['district_url_str'])
        try:
            resp = json.loads(response.body.decode())
            status_code = resp['code']
            assert status_code == 200, f'{city_name}-{district_str}-第{page_num}页 小区列表状态码出错，{status_code}'
            cmty_li = resp['data']['list']
            assert cmty_li, f'{city_name}-{district_str}-第{page_num}页 小区列表获取为空'
        except AssertionError as e:
            logger.error(e)
        except Exception as e:
            logger.error(f'{city_name}-{district_str}-第{page_num}页 小区列表获取出错，error_msg:{e}')
        else:
            #  遍历提取小区列表页信息
            for cmty_dict in cmty_li:
                item_cmty = CommunityItem()
                item_cmty['provinceName'] = copy(response.meta['esf_province_name'])  # 省份
                item_cmty['provinceId'] = copy(response.meta['esf_province_id'])  # 省份ID
                item_cmty['cityName'] = city_name  # 城市
                item_cmty['cityId'] = copy(response.meta['esf_city_id'])  # 城市ID
                item_cmty['districtName'] = cmty_dict.get('districtName', None)  # 行政区
                item_cmty['districtId'] = copy(response.meta['esf_district_id'])  # 行政区ID
                item_cmty['name'] = cmty_dict.get('name', None)  # 楼盘名称
                item_cmty['address'] = cmty_dict.get('address', None)  # 楼盘地址
                item_cmty['greeningRate'] = cmty_dict.get('greenRate', None)  # 绿化率
                item_cmty['shangQuan'] = cmty_dict.get('sectionName', None)  # 商圈
                item_cmty['propertyCompany'] = cmty_dict.get('propertyCompany', None)  # 物管公司
                item_cmty['monthOnMonth'] = cmty_dict.get('priceLastMonthIncPercent', None)  # 环比
                item_cmty['location'] = self.deal_lng_and_lat(lng=cmty_dict.get('lng', None),
                                                              lat=cmty_dict.get('lat', None))  # 经纬度
                item_cmty['buildStructure'] = cmty_dict.get('buildingType', None)  # 建筑结构
                item_cmty['price'] = cmty_dict.get('price', None)  # 单价
                item_cmty['communityUrl'] = self.deal_community_url(cmty_dict.get('url', None))  # 楼盘链接
                item_cmty['other'] = self.deal_community_url(cmty_dict.get('amountRange', None))  # 其它信息

                # 获取小区详情
                if item_cmty['communityUrl']:
                    # 构造请求头
                    headers = {
                        'referer': item_cmty['communityUrl'],
                        'content-type': 'application/json; charset=utf-8',
                    }
                    yield scrapy.Request(
                        url=item_cmty['communityUrl'] + '?SSR_JSON=true',
                        headers=headers,
                        callback=self.parse_community_detail,
                        meta=dict(item_cmty=deepcopy(item_cmty), ),
                        priority=4,
                    )
                else:
                    logger.error('{}-{}-{} 小区详情页url解析出错，url:{}'.format(item_cmty['cityName'], item_cmty['districtName'],
                                                                       item_cmty['name'],
                                                                       cmty_dict.get('url', None)))

            #  翻页
            try:
                total_num = int(resp['data']['total'])  # 小区总数
                total_page_num = total_num // 20 + 1 if total_num % 20 else total_num // 20  # 总页数
            except Exception as e:
                logger.error(f'{city_name}-{district_str}-第{page_num}页 总页数获取出错，error_msg:{e}')
            else:
                # 遍历构造小区列表翻页请求
                for next_page_num in range(2, total_page_num + 1):
                    # 构造请求参数
                    params_dict = self.create_cmty_li_params(city_id=city_id, page=next_page_num, url=district_url_str)
                    if params_dict:
                        # 构造请求头
                        referer_url = response.request.headers['Referer'].decode() + f'?pageNo={next_page_num}'
                        headers = {
                            'referer': referer_url,
                            'content-type': 'application/json; charset=utf-8',
                        }
                        yield scrapy.Request(
                            response.request.url,
                            method='POST',
                            headers=headers,
                            body=json.dumps(params_dict),
                            callback=self.parse_community_li,
                            meta=dict(
                                city_name=city_name,
                                city_id=city_id,
                                page_num=next_page_num,
                                district_str=deepcopy(district_str),
                                district_url_str=deepcopy(district_url_str),
                                esf_province_name=copy(response.meta['esf_province_name']),
                                esf_province_id=copy(response.meta['esf_province_id']),
                                esf_city_id=copy(response.meta['esf_city_id']),
                                esf_district_id=copy(response.meta['esf_district_id']),
                            ),
                            priority=3,
                        )
                    else:
                        logger.error(f'{city_name}-{district_str}-第{next_page_num}页 请求参数构造出错')
            del cmty_li
            del resp
            del response

    def parse_community_detail(self, response):
        """
        获取小区详情
        :param response:
        :return:
        """
        item_cmty = copy(response.meta['item_cmty'])
        try:
            resp = json.loads(response.body.decode())
            cmty_detail_dict = resp['xqDetail']['detail']
            assert cmty_detail_dict, '{}-{}-{} 小区详情获取为空，url:{}'.format(item_cmty['cityName'], item_cmty['districtName'],
                                                                       item_cmty['name'], response.request.url)
            cmty_esf_dict = resp['xqDetail']['xqEsf']
            assert cmty_esf_dict, '{}-{}-{} 二手房案例信息获取为空，url:{}'.format(item_cmty['cityName'],
                                                                       item_cmty['districtName'],
                                                                       item_cmty['name'], response.request.url)
        except AssertionError as e:
            logger.error(e)
        except Exception:
            logger.error(
                '{}-{}-{} 小区详情/二手房案例列表获取失败，url:{}'.format(item_cmty['cityName'], item_cmty['districtName'],
                                                          item_cmty['name'],
                                                          response.request.url))
        else:
            # 提取小区详情信息
            item_cmty['guid'] = cmty_detail_dict.get('id', None)  # 小区id
            item_cmty['projectBrief'] = cmty_detail_dict.get('intro', None)  # 项目简介
            item_cmty['type'] = cmty_detail_dict.get('propertyType', None)  # 物业类型
            item_cmty['buildYear'] = cmty_detail_dict.get('buildingYear', None)  # 建筑年代
            item_cmty['grossBuildArea'] = cmty_detail_dict.get('buildingArea', None)  # 楼盘建筑面积
            item_cmty['propertyYears'] = cmty_detail_dict.get('propertyRights', None)  # 产权年限
            item_cmty['floorAreaRatio'] = cmty_detail_dict.get('plotRatio', None)  # 容积率
            item_cmty['numberOfParkingSpaces'] = cmty_detail_dict.get('parkingSpace', None)  # 车位数
            item_cmty['totalRoomCount'] = cmty_detail_dict.get('householdsTotal', None)  # 总户数
            item_cmty['developerName'] = cmty_detail_dict.get('developerName', None)  # 开发商
            item_cmty['updateDate'] = cmty_detail_dict.get('updateTime', None)  # 更新时间
            item_cmty['totalNumberOfBuildings'] = cmty_detail_dict.get('buildingCount', None)  # 总楼栋数
            item_cmty['propertyFee'] = cmty_detail_dict.get('propertyFee', None)  # 物管费
            item_cmty['priceTrend'] = self.deal_price_trend(cmty_detail_dict.get('cellPriceTrends', list()))  # 价格趋势
            item_cmty['transportFacility'] = cmty_detail_dict.get('metros', list())  # 交通状况
            yield item_cmty

            # 提取二手房案例列表信息
            total_num = cmty_esf_dict.get('total', 0)
            cmty_esf_li = cmty_esf_dict.get('list', list())
            if total_num and cmty_esf_li:
                for cmty_esf in cmty_esf_li:
                    item_case = CaseItem()
                    for key, value in item_cmty.items():
                        try:
                            item_case[key] = value
                        except:
                            continue
                    item_case['roomType'] = cmty_esf.get('name', None)
                    item_case['bedroom'] = cmty_esf.get('shi', None)
                    item_case['livingroom'] = cmty_esf.get('ting', None)
                    item_case['bathroom'] = cmty_esf.get('wei', None)
                    item_case['buildArea'] = cmty_esf.get('area', None)
                    item_case['price'] = cmty_esf.get('price', None)
                    item_case['totalPrice'] = cmty_esf.get('minPrice', None)
                    item_case['directionType'] = cmty_esf.get('direction', None)
                    item_case['floorLevels'] = cmty_esf.get('onFloor', None)
                    item_case['floor'] = self.get_case_floor(item_case['floorLevels'])
                    item_case['totalFloor'] = cmty_esf.get('totalFloor', None)
                    item_case['caseId'] = cmty_esf.get('id', None)
                    item_case['dataUrl'] = self.deal_case_url(response.request.url, item_case['caseId'])
                    if item_case['dataUrl']:
                        # 构造请求对象，获取案例详情
                        headers = {
                            'referer': item_case['dataUrl'],
                        }
                        yield scrapy.Request(
                            url=urllib.parse.urljoin(item_case['dataUrl'], '?SSR_JSON=true'),
                            headers=headers,
                            callback=self.parse_case_detail,
                            meta=dict(item_case=deepcopy(item_case), ),
                            priority=5,
                        )
                    else:
                        logger.error('{}-{}-{}-{}-{} 案例url构造失败'.format(item_cmty['cityName'], item_cmty['districtName'],
                                                                       item_cmty['name'], item_case['roomType'],
                                                                       item_case['caseId']))
            else:
                logger.warning('{}-{}-{} 小区在售二手房获取为空，url:{}'.format(item_cmty['cityName'], item_cmty['districtName'],
                                                                    item_cmty['name'], item_cmty['communityUrl']))
            del cmty_esf_li
            del cmty_detail_dict
            del cmty_esf_dict
            del resp
            del response

    def parse_case_detail(self, response):
        """
        获取案例详情
        :param response:
        :return:
        """
        item_case = copy(response.meta['item_case'])
        try:
            resp = json.loads(response.body.decode())
            esf_detail_dict = resp.get('esfDetail', dict())
            detail_dict = esf_detail_dict.get('detail', dict())
            assert esf_detail_dict and detail_dict, '{}-{}-{} 案例详情信息提取为空，url:{}'.format(item_case['cityName'],
                                                                                        item_case['districtName'],
                                                                                        item_case['name'],
                                                                                        response.request.url)
            agent_allcocate_dict = esf_detail_dict.get('agentAllcocate', dict())
            esf_html_title_dict = resp.get('html', dict())
        except AssertionError as e:
            logger.error(e)
        except Exception as e:
            logger.error('{}-{}-{} 案例详情信息获取出错，url:{}，error:{}'.format(item_case['cityName'], item_case['districtName'],
                                                                      item_case['name'], response.request.url, e))
        else:
            # 提取案例详情信息
            item_case['title'] = esf_html_title_dict.get('title', None)  # 标题
            tags_li = detail_dict.get('tags', list())
            item_case['isTwoYear'] = self.deal_case_choice_item(tags_li, 1)  # 是否满两年
            item_case['isFiveYear'] = self.deal_case_choice_item(tags_li, 2)  # 是否满五年
            item_case['isElevator'] = self.deal_case_choice_item(tags_li, 3)  # 是否有电梯
            item_case['mortgage'] = self.deal_case_choice_item(tags_li, 4)  # 有无贷款
            item_case['propertyYears'] = detail_dict.get('propertyYear', None)  # 产权年限（和小区不一致，直接覆盖）
            item_case['calQuality'] = detail_dict.get('propertyType', None)  # 住宅类别
            item_case['updateDate'] = detail_dict.get('updateTime', None)  # 更新时间（和小区不一致，直接覆盖）
            item_case['createDate'] = detail_dict.get('createTime', None)  # 创建时间
            item_case['broker'] = agent_allcocate_dict.get('name', None)  # 经纪人
            item_case['tel'] = agent_allcocate_dict.get('mobile', None)  # 电话
            yield item_case
            del detail_dict
            del esf_detail_dict
            del resp
            del response

    def process_links_city_li(self, links):
        """
        构造城市url
        :param links:
        :return:
        """
        cities_li, type = self.get_crawl_or_filter_cities()
        url = '/xiaoqu/?SSR_JSON=true'
        new_links = list()
        if len(links):
            for link in links:
                # type==1 代表cities_li是爬取列表
                if type == 1:
                    if cities_li and link.text in cities_li:
                        try:
                            link.url = self.url_join(link.url, url=url)
                            assert link.url
                        except:
                            logger.error('【】【】二手房小区url拼接出错'.format(link.url, url))
                        new_links.append(link)
                #  type==2 代表cities_li是过滤列表
                if type == 2:
                    if cities_li and link.text not in cities_li:
                        try:
                            link.url = self.url_join(link.url, url=url)
                            assert link.url
                        except:
                            logger.error('【】【】二手房小区url拼接出错'.format(link.url, url))
                        new_links.append(link)
            return new_links
        else:
            logger.error('二手房城市url列表匹配为空')
            self.crawler.engine.close_spider(self, '二手房城市url列表匹配为空')

    def process_request_city_li(self, request, spider):
        """
        过滤城市url请求
        :param request:
        :param spider:
        :return:
        """
        info_str_1 = '*' * 40
        info_str_2 = '开始【{}】二手房爬取'.format(request.meta['link_text'])
        count = 38 - len(info_str_2) * 2
        request.headers['Referer'] = request.url.replace('?SSR_JSON=true', '')
        logger.info('\r\n' + info_str_1 + '\r\n' + '*' + ' ' * (count // 2) + info_str_2 + ' ' * (
                count - count // 2) + '*' + '\r\n' + info_str_1)
        return request

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    def check_params(self, params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        try:
            if isinstance(params, str):
                try:
                    params = regex_1.sub(',', params).split(',')
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数输入错误，请重新输入，例如： scrapy crawl fangduoduo -a crawl=重庆，北京')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            elif isinstance(params, list):
                try:
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            else:
                raise AssertionError('参数类型不支持')
        except Exception as e:
            logger.error(e)
            self.crawler.engine.close_spider(self, '参数错误')

    @staticmethod
    def url_join(base: str, url: str) -> str:
        """
        url拼接
        :param base: 基本的url
        :param url: 拼接的url
        :return:
        """
        return urllib.parse.urljoin(base, url)

    @staticmethod
    def create_cmty_li_params(city_id, page, url):
        """
        构造小区列表请求参数
        :param city_id: 城市id
        :param page: 页数num
        :param url: 行政区/县url字符串
        :return:
        """
        regex = re.compile(r'-(.*)/$')
        condition_temp = 'p{}_i20_{}'
        try:
            ret = regex.findall(url)[0]
            assert ret
        except:
            return
        else:
            params_dict = dict(
                cityId=city_id,
                condition=condition_temp.format(page, ret)
            )
            return params_dict

    @staticmethod
    def deal_lng_and_lat(lng, lat) -> list or None:
        """
        处理经纬度
        :param lng: 经度
        :param lat: 纬度
        :return:
        """
        try:
            assert lng and lat
            ret = [lng, lat]
        except:
            return
        else:
            return ret

    @staticmethod
    def deal_community_url(url):
        """
        处理小区详情页url
        :param url:
        :return:
        """
        # 小区详情页url模板
        community_url_temp = 'https://{}.fangdd.com/{}'
        try:
            if not (url.startswith('http') and url.endswith('html')):
                ret_li = url.split('/', maxsplit=2)[1:]
                assert len(ret_li) == 2
                community_url = community_url_temp.format(ret_li[0], ret_li[1])
            else:
                return url
        except:
            return
        else:
            return community_url

    @staticmethod
    def deal_price_trend(trend_li: list) -> dict or None:
        """
        处理价格趋势
        :param trend_li:
        :return:
        """
        price_trend_dict = OrderedDict()
        try:
            assert len(trend_li)
            for trend_dict in trend_li:
                price_dict_key = trend_dict.get('name', None)
                price_dict_value = trend_dict.get('price', None)
                price_trend_dict[price_dict_key] = price_dict_value
        except:
            return
        else:
            return price_trend_dict

    @staticmethod
    def get_case_floor(floor_num: int):
        """
        获取案例所在楼层区（ >= 36"超高层";>= 19 "高层";>= 8"小高层";>= 5"多层";>= 1"低层"; ）
        :param floor_num:
        :return:
        """
        try:
            floor_num = int(floor_num)
            if floor_num >= 36:
                floor_str = '超高层'
            elif floor_num >= 19:
                floor_str = '高层'
            elif floor_num >= 8:
                floor_str = '小高层'
            elif floor_num >= 5:
                floor_str = '多层'
            elif floor_num >= 1:
                floor_str = '低层'
            else:
                floor_str = None
        except:
            return
        else:
            return floor_str

    @staticmethod
    def deal_case_choice_item(tags_li: list, choice: int) -> bool:
        """
        处理案例部分字段（满两年、满五年、电梯、贷款）
        :param tags_li:
        :param choice:  1: '是否满两年';2: '是否满五年';3: '是否有电梯';4: '是否有贷款';
        :return:
        """
        try:
            assert tags_li
            tags_str = ','.join(tags_li)
            if choice == 1:
                ret = True if '满两' in tags_str else False
            elif choice == 2:
                ret = True if '满五' in tags_str else False
            elif choice == 3:
                ret = True if ('电梯房' or '有电梯') in tags_str else False
            elif choice == 4:
                ret = False if '无贷款' in tags_str else True
            else:
                ret = False
        except:
            return False
        else:
            return ret

    @staticmethod
    def deal_case_url(base_url, case_id):
        """
        构造案例详情页url
        :param base_url: 参考url，用于案例url构造
        :param case_id: 案例id
        :return:
        """
        # 案例详情页url模板
        case_url_temp = '{}/esf/n-{}.html'
        try:
            base_str = base_url.split('/xiaoqu')[0]
            assert base_str and case_id
            case_url = case_url_temp.format(base_str, case_id)
        except:
            return
        else:
            return case_url

    @staticmethod
    def get_province_id_and_city_id(city_name, data_frame) -> tuple:
        """
        获取省id、城市id
        :param city_name: 城市名
        :param data_frame: DataFrame
        :return:
        """
        try:
            # 获取省id、城市id
            city_df = data_frame.loc[(data_frame.type == '城市') & (data_frame.name.str.contains(city_name))]
            city_li = city_df.to_dict(orient='records')
            assert len(city_li) == 1
            city_dict = city_li[0]
            province_id = city_dict.get('parentId', None)
            city_id = city_dict.get('id', None)
            assert province_id and city_id
            # 获取省name
            province_df = data_frame.loc[(data_frame.type == '省份') & (data_frame.id == province_id)]
            province_li = province_df.to_dict(orient='records')
            assert len(province_li) == 1
            province_dict = province_li[0]
            province_name = province_dict.get('name', None)
            assert province_name
        except:
            logger.error('【{}】 获取省name、省id和城市id出错'.format(city_name))
            return None, None, None
        else:
            return province_name, province_id, city_id

    def get_district_id(self, city_name, city_id, district_name, data_frame):
        """
        获取 区/县id
        :param city_name: 城市name
        :param city_id: 城市id（数据库id，非网站id）
        :param district_name: 区/县str
        :param data_frame: DataFrame
        :return:
        """
        try:
            assert city_id and district_name
            # 特殊处理 东莞市
            if '东莞' in city_name:
                district_df = data_frame.loc[(data_frame.type == '行政区') & (data_frame.parentId == city_id)]
            else:
                district_name = self.deal_district_str(city_name, district_name)
                # 获取区/县id
                district_df = data_frame.loc[(data_frame.type == '行政区') & (data_frame.parentId == city_id) &
                                             ((data_frame.name.str.contains(district_name)) | (
                                                 data_frame.otherName.str.contains(district_name)))]
            district_li = district_df.to_dict(orient='records')
            assert len(district_li) == 1
            district_dict = district_li[0]
            district_id = district_dict.get('id', None)
            assert district_id
        except:
            logger.error('【{}-{}】 获取区/县id出错'.format(city_name, district_name))
        else:
            return district_id

    @staticmethod
    def deal_district_str(city_name, district_name):
        """
        处理行政区/县str
        :param city_name: 城市名
        :param district_name: 行政区/县str
        :return:
        """
        try:
            district_name = district_name.replace('小区', '').split('(')[0]
        except:
            logger.error('【{}-{}】 区/县str解析出错'.format(city_name, district_name))
        else:
            return district_name
