import scrapy

import logging
import re
import pandas as pd
from copy import copy, deepcopy
import urllib.parse
from HifoEsf.items import CommunityItem, CaseItem
import json

logger = logging.getLogger(__name__)


class FangtianxiaSpider(scrapy.Spider):
    name = 'fangtianxia'
    allowed_domains = ['fang.com']
    start_urls = ['https://cq.esf.fang.com/newsecond/esfcities.aspx']
    custom_settings = {
        'CONCURRENT_REQUESTS': 5,
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'HifoEsf.middlewares.CustomRetryMiddleware': 500,
            'HifoEsf.middlewares.UserAgentMiddleware': 544,
            'HifoEsf.middlewares.OuterNetProxyMiddleware': 545,  # 启用redis中的代理，需要同时开启 RedisConnPipeline
            'HifoEsf.middlewares.FangtianxiaCookiesMiddleware': 546,
        },
        'ITEM_PIPELINES': {
            'HifoEsf.pipelines.RedisConnPipeline': 299,  # 启用redis中的代理，需要同时开启 XXXProxyMiddleware
            'HifoEsf.pipelines.FangtianxiaPipeline': 301,
            'HifoEsf.pipelines.MongoClientPipeline': 399,
        },
        'RETRY_HTTP_CODES': [500, 502, 503, 504, 400, 404, 408, 407, 302],
        'RETRY_TIMES': 20,
        'COOKIES_ENABLED': True,  # 携带cookies
        'CASE_ID_FILTER': True,  # 是否开启案例id去重
        'COOKIES_REDIS_NAME': 'ftx_cookies',
    }

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter
        super().__init__(*args, **kwargs)

    def parse(self, response, **kwargs):
        try:
            # 去重校验
            assert self.filter_flag != False, f'>>>>{self.name}:案例id去重配置失败<<<<'
            # 参数校验
            cities_li, type = self.get_crawl_or_filter_cities()
        except AssertionError as e:
            # 去重校验未通关，关闭爬虫
            self.crawler.engine.close_spider(self, e)
        except Exception as e:
            logger.error(e)
            # 校验未通过，关闭爬虫
            self.crawler.engine.close_spider(self, '参数错误')
        else:
            # 校验通过
            # 提取省份分组
            province_obj_li = response.xpath("//div[@id='c02']/ul/li[position()<last()]")
            ftx_city_li = list()
            for province_obj in province_obj_li:
                province_name = province_obj.xpath("./strong/text()").extract_first()
                if province_name == '重庆':
                    continue
                # 提取该省份下的城市分组
                city_obj_li = province_obj.xpath("./a")
                for city_obj in city_obj_li:
                    city_dict = dict()
                    city_dict['city_name'] = city_obj.xpath("./text()").extract_first()
                    city_dict['city_url'] = city_obj.xpath("./@href").extract_first()
                    city_dict['province_name'] = province_name if province_name != '直辖市' else city_dict['city_name']
                    ftx_city_li.append(city_dict)
            # 城市预处理
            if cities_li:
                cities_df = pd.DataFrame(ftx_city_li)
                # 爬取列表
                if type == 1:
                    # 如果爬取列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = cities_df.to_dict(orient='records')
                    else:
                        crawl_city_li = cities_df.loc[cities_df.city_name.isin(cities_li)].to_dict(orient='records')
                # 过滤列表
                else:
                    # 如果过滤列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = list()
                        self.crawler.engine.close_spider(self, '过滤列表包含【全国】')
                    # 不包含 全国
                    else:
                        crawl_city_li = cities_df.loc[~cities_df.city_name.isin(cities_li)].to_dict(orient='records')

                # 构造城市二手房请求
                for esf_request in self.create_city_request(crawl_city_li):
                    # todo 添加请求过滤条件
                    yield esf_request
                del cities_df
            else:
                logger.error('房天下二手房城市url列表匹配为空')
                self.crawler.engine.close_spider(self, '房天下二手房城市url列表匹配为空')
        pass

    def create_city_request(self, crawl_city_li):
        """
        构造城市二手房请求
        :param crawl_city_li: 爬取城市列表，包含url构造信息
        :return:
        """
        try:
            assert len(crawl_city_li)
        except:
            self.crawler.engine.close_spider(self, '房天下二手房城市爬取列表为空')
        else:
            for crawl_city_dict in crawl_city_li:
                # 城市二手房url标准化处理
                crawl_city_dict = self.format_crawl_city_dict(crawl_city_dict)
                if crawl_city_dict:
                    esf_url = crawl_city_dict['esf_url']
                    headers = {
                        'referer': crawl_city_dict['city_url']
                    }
                    # 获取二手房房源（案例）行政区列表
                    page_num = 1
                    # 住宅
                    zz_page_param = f'__1_3_0_0_{page_num}_0_0_0'
                    yield scrapy.Request(
                        esf_url.format(zz_page_param),
                        headers=headers,
                        callback=self.parse_district_list,
                        meta=dict(crawl_city_dict=deepcopy(crawl_city_dict), type='住宅', page_num=page_num),
                        dont_filter=True,
                    )
                    # 别墅
                    bs_page_param = f'__2_3_0_0_{page_num}_0_0_0'
                    yield scrapy.Request(
                        esf_url.format(bs_page_param),
                        headers=headers,
                        callback=self.parse_district_list,
                        meta=dict(crawl_city_dict=deepcopy(crawl_city_dict), type='别墅', page_num=page_num),
                        dont_filter=True,
                    )

    def parse_district_list(self, response):
        """
        获取二手房房源（案例）行政区列表
        :param response:
        :return:
        """
        crawl_city_dict = copy(response.meta['crawl_city_dict'])
        type = copy(response.meta['type'])
        page_num = copy(response.meta['page_num'])
        # 提取行政区分组
        a_obj_li = response.xpath(
            "//div[@id='houselist_B03_02']/div[@class='qxName']/a[position()>1]")
        if len(a_obj_li):
            total_num = int(response.xpath("//b[@class='findplotNum']/text()").extract_first())
            if total_num:
                # 无论是否超过2000，均区分行政区，剔除周边地区、旅游地产和海外地产等
                for a_obj in a_obj_li:
                    district_name = a_obj.xpath("./text()").extract_first()
                    if any([not district_name, '周边' in district_name, '地产' in district_name]):
                        continue
                    xiaoqu_url = a_obj.xpath("./@href").extract_first()
                    # 构造请求，获取小区列表页首页
                    yield response.follow(
                        xiaoqu_url,
                        callback=self.parse_project_list,
                        meta=dict(crawl_city_dict=deepcopy(crawl_city_dict), type=deepcopy(type),
                                  district_name=deepcopy(district_name), page_num=deepcopy(page_num), ),
                        dont_filter=True,
                    )
            else:
                logging.warning(
                    '{}-{}-{} 小区总数提取为0'.format(crawl_city_dict['province_name'], crawl_city_dict['city_name'], type))
        else:
            logger.error(
                '{}-{}-{} 行政区列表获取为空'.format(crawl_city_dict['province_name'], crawl_city_dict['city_name'], type))

    def parse_project_list(self, response):
        """
        获取小区列表页
        :param response:
        :return:
        """
        crawl_city_dict = copy(response.meta['crawl_city_dict'])
        type = copy(response.meta['type'])
        district_name = copy(response.meta['district_name'])
        page_num = copy(response.meta['page_num'])
        # 提取小区列表
        div_obj_li = response.xpath("//div[@class='houseList']/div[contains(@class,'list')]")
        if len(div_obj_li):
            for div_obj in div_obj_li:
                item_cmty = CommunityItem()
                item_cmty['provinceName'] = crawl_city_dict['province_name']  # 省份
                item_cmty['cityName'] = crawl_city_dict['city_name']  # 城市
                item_cmty['districtName'] = district_name  # 行政区
                item_cmty['name'] = div_obj.xpath("./dl//a[@class='plotTit']/text()").extract_first()  # 楼盘名称
                item_cmty['communityUrl'] = div_obj.xpath("./dl//a[@class='plotTit']/@href").extract_first()  # 楼盘链接
                item_cmty['type'] = type  # 物业类型
                item_cmty['shangQuan'] = div_obj.xpath("./dl/dd/p[2]/a[2]/text()").extract_first()  # 商圈
                item_cmty['other'] = div_obj.xpath("./dl/dd/ul/li/a/text()").extract()  # 其它信息
                # 构造请求，获取小区首页信息
                if item_cmty['communityUrl']:
                    yield response.follow(
                        item_cmty['communityUrl'],
                        callback=self.parse_project,
                        meta=dict(item_cmty=deepcopy(item_cmty), ),
                        dont_filter=True,
                    )
                else:
                    logger.error('{}-{}-{}-{}-{} 小区连接提取为空'.format(item_cmty['provinceName'], item_cmty['cityName'],
                                                                  item_cmty['districtName'], type, item_cmty['name']))
        else:
            logging.warning(
                '{}-{}-{}-{}-第{}页 小区列表为空'.format(crawl_city_dict['province_name'], crawl_city_dict['city_name'],
                                                 district_name, type, page_num))

        # 翻页
        next_page_url = response.xpath("//a[contains(text(),'下一页')]/@href").extract_first()
        next_page_num = page_num + 1
        if next_page_url:
            yield response.follow(
                next_page_url,
                callback=self.parse_project_list,
                meta=dict(crawl_city_dict=deepcopy(crawl_city_dict), type=deepcopy(type),
                          district_name=deepcopy(district_name), page_num=deepcopy(next_page_num), ),
                dont_filter=True,
            )

    def parse_project(self, response):
        """
        获取小区首页信息
        :param response:
        :return:
        """
        item_cmty = copy(response.meta['item_cmty'])
        item_cmty['communityUrl'] = response.request.url  # 楼盘链接
        item_cmty['guid'] = item_cmty['communityUrl'].split("loupan/")[1].split(".")[0]  # 小区id
        item_cmty['price'] = response.xpath("//p[@class='num_price']/b/text()").extract_first()  # 单价
        item_cmty['totalNumberOfBuildings'] = response.xpath(
            "//span[contains(text(),'楼栋总数')]/following-sibling::p/text()").extract_first()  # 总楼栋数
        item_cmty['totalRoomCount'] = response.xpath(
            "//span[contains(text(),'房屋总数')]/following-sibling::p/text()").extract_first()  # 总户数
        item_cmty['buildType'] = response.xpath(
            "//span[contains(text(),'建筑类型')]/following-sibling::p/text()").extract_first()  # 建筑类别
        item_cmty['buildYear'] = response.xpath(
            "//span[contains(text(),'建筑年代')]/following-sibling::p/text()").extract_first()  # 建筑年代
        item_cmty['address'] = response.xpath(
            "//span[contains(text(),'小区位置')]/following-sibling::p/span/text()").extract_first()  # 楼盘地址
        item_cmty['propertyCompany'] = response.xpath(
            "//span[contains(text(),'物业公司')]/following-sibling::p/a/text()").extract_first()  # 物管公司
        item_cmty['developerName'] = response.xpath(
            "//span[contains(text(),'开发商')]/following-sibling::p/a/text()").extract_first()  # 开发商
        item_cmty['monthOnMonth'] = response.xpath("//p[@class='text_chain']/span/text()").extract_first()  # 环比
        # 构造请求，获取小区详情信息
        project_detail_url = response.xpath("//a[contains(text(),'小区详情')]/@href").extract_first()
        project_detail_url = urllib.parse.urljoin(item_cmty['communityUrl'], project_detail_url)
        yield scrapy.Request(
            project_detail_url,
            callback=self.parse_project_detail,
            meta=dict(item_cmty=deepcopy(item_cmty), ),
            dont_filter=True,
        )

        # 构造请求，获取二手房挂牌案例列表
        case_list_url = response.xpath("//span[text()='二手房源']/following-sibling::p/a/@href").extract_first()
        case_list_url = urllib.parse.urljoin(item_cmty['communityUrl'], case_list_url) + 'h316/'
        yield scrapy.Request(
            case_list_url,
            callback=self.parse_case_list,
            meta=dict(item_cmty=deepcopy(item_cmty), page_num=1),
            dont_filter=True,
        )

    def parse_project_detail(self, response):
        """
        获取小区详情信息
        :param response:
        :return:
        """
        item_cmty = copy(response.meta['item_cmty'])
        item_cmty['propertyYears'] = response.xpath(
            "//span[contains(text(),'产权描述')]/following-sibling::p/text()").extract_first()  # 产权年限
        item_cmty['grossBuildArea'] = response.xpath(
            "//span[contains(text(),'建筑面积')]/following-sibling::p/text()").extract_first()  # 楼盘建筑面积
        item_cmty['coverageArea'] = response.xpath(
            "//span[contains(text(),'占地面积')]/following-sibling::p/text()").extract_first()  # 占地面积
        item_cmty['greeningRate'] = response.xpath(
            "//span[contains(text(),'绿 化 率')]/following-sibling::p/text()").extract_first()  # 绿化率
        item_cmty['floorAreaRatio'] = response.xpath(
            "//span[contains(text(),'容 积 率')]/following-sibling::p/text()").extract_first()  # 容积率
        item_cmty['propertyFee'] = response.xpath(
            "//span[contains(text(),'附加信息')]/following-sibling::div/p/text()").extract_first()  # 物管费
        item_cmty['numberOfParkingSpaces'] = response.xpath(
            "//span[contains(text(),'停 车 位')]/following-sibling::p/text()").extract_first()  # 车位数

        # 构造请求，获取小区经纬度
        map_url = "https://ditu.fang.com/?c=channel&a=xiaoquNew&newcode={}"
        yield scrapy.Request(
            map_url.format(item_cmty['guid']),
            callback=self.parse_location,
            meta=dict(item_cmty=deepcopy(item_cmty), ),
            dont_filter=True,
        )

    def parse_location(self, response):
        """
        获取小区经纬度
        :param response:
        :return:
        """
        item_cmty = copy(response.meta['item_cmty'])
        item_cmty['location'] = self.get_project_location(response.body.decode())  # 经纬度
        yield item_cmty

    def parse_case_list(self, response):
        """
        获取二手房挂牌案例列表
        :param response:
        :return:
        """
        item_cmty = copy(response.meta['item_cmty'])
        page_num = copy(response.meta['page_num'])
        # 获取当前页案例分组
        case_obj_list = response.xpath("//div[@class='shop_list shop_list_4']/dl")
        if len(case_obj_list):
            for case_obj in case_obj_list:
                item_case = CaseItem()
                item_case['provinceName'] = item_cmty['provinceName']  # 省份
                item_case['cityName'] = item_cmty['cityName']  # 城市
                item_case['districtName'] = item_cmty['districtName']  # 行政区
                item_case['shangQuan'] = item_cmty['shangQuan']  # 商圈
                item_case['name'] = item_cmty['name']  # 楼盘名称
                item_case['guid'] = item_cmty['guid']  # 小区id
                item_case['caseType'] = '挂牌'  # 案例类型
                item_case['address'] = case_obj.xpath(".//p[@class='add_shop']/span/text()").extract_first()  # 楼盘地址
                item_case['title'] = case_obj.xpath(".//h4/a/@title").extract_first()  # 标题
                item_case['dataUrl'] = case_obj.xpath(".//h4/a/@href").extract_first()  # 案例url
                item_case['caseId'] = self.get_case_id(case_obj.xpath("./@data-bg").extract_first())  # 案例id
                if not item_case['caseId']:
                    item_case['caseId'] = self.get_case_id(case_obj.xpath("./@data-qyk").extract_first())  # 案例id
                item_case['floor'] = response.xpath("//p[@class='tel_shop']/a/text()").extract_first()  # 所在楼层区
                item_case['totalPrice'] = case_obj.xpath(
                    ".//dd[@class='price_right']/span[@class]/b/text()").extract_first()  # 总价
                item_case['price'] = case_obj.xpath(".//dd[@class='price_right']/span[last()]/text()").extract_first()  # 单价
                item_case['tag'] = case_obj.xpath(".//dd/p[3]/a/text()").extract()  # 标签
                item_case['isFiveYear'], item_case['isTwoYear'] = self.deal_two_or_five(item_case['tag'])  # 是否满五年、是否满两年
                case_info_dict = self.deal_case_info(case_obj.xpath(".//p[@class='tel_shop']/text()").extract())
                item_case['roomType'] = case_info_dict.get('roomType', None)  # 户型
                item_case['buildArea'] = case_info_dict.get('buildArea', None)  # 建筑面积
                item_case['totalFloor'] = case_info_dict.get('totalFloor', None)  # 总楼层
                item_case['directionType'] = case_info_dict.get('directionType', None)  # 朝向
                item_case['buildYear'] = case_info_dict.get('buildYear', None)  # 建筑年代
                # 构造请求，获取案例详情
                if item_case['dataUrl']:
                    # 根据案例id去重处理
                    if (self.settings['CASE_ID_FILTER'] and self.redis_conn.sadd('fangtianxia_esf_case_id',
                                                                                 item_case['caseId'])) or (
                    not self.settings['CASE_ID_FILTER']):
                        yield response.follow(
                            item_case['dataUrl'],
                            callback=self.parse_case_detail,
                            meta=dict(item_case=deepcopy(item_case), ),
                            dont_filter=False,
                            priority=30,
                        )
                else:
                    logger.warning("{}-{}-{}-{} 案例url提取为空".format(item_case['provinceName'], item_case['cityName'],
                                                                  item_case['districtName'], item_case['title']))
        else:
            logger.warning("{}-{}-{}-{}-第{}页 案例列表获取为空".format(item_cmty['provinceName'], item_cmty['cityName'],
                                                              item_cmty['districtName'], item_cmty['name'], page_num))

        # 翻页
        next_page_url = response.xpath("//a[contains(text(),'下一页')]/@href").extract_first()
        next_page_num = page_num + 1
        if next_page_url:
            yield response.follow(
                next_page_url,
                callback=self.parse_case_list,
                meta=dict(item_cmty=deepcopy(item_cmty), page_num=deepcopy(next_page_num), ),
                dont_filter=True,
            )

    def parse_case_detail(self, response):
        """
        提取案例详情
        :param response:
        :return:
        """
        item_case = copy(response.meta['item_case'])
        item_case['dataUrl'] = response.request.url  # 案例url
        if not item_case['roomType']:
            item_case['roomType'] = response.xpath(
                "//div[contains(text(),'户型')]/preceding-sibling::div[1]/text()").extract_first()  # 户型
        if not item_case['buildArea']:
            item_case['buildArea'] = response.xpath(
                "//div[contains(text(),'建筑面积')]/preceding-sibling::div[1]/text()").extract_first()  # 建筑面积
        if not item_case['directionType']:
            item_case['directionType'] = response.xpath(
                "//div[contains(text(),'朝向')]/preceding-sibling::div[1]/text()").extract_first()  # 朝向
        item_case['decorationType'] = response.xpath(
            "//div[contains(text(),'装修')]/preceding-sibling::div[1]/a/text()").extract_first()  # 装修
        if not item_case['totalFloor']:
            item_case['totalFloor'] = response.xpath("//div[contains(text(),'楼层')]/text()").extract_first()  # 总楼层
        if not item_case['floor']:
            item_case['floor'] = response.xpath(
                "//div[contains(text(),'楼层')]/preceding-sibling::div[1]/a/text()").extract_first()  # 所在楼层区
        if not item_case['buildYear']:
            item_case['buildYear'] = response.xpath(
                "//div[@class='content-item fydes-item']//span[contains(text(),'建筑年代')]/following-sibling::span[1]/text()").extract_first()  # 建筑年代
        item_case['isElevator'] = response.xpath(
            "//span[contains(text(),'有无电梯')]/following-sibling::span[1]/text()").extract_first()  # 是否有电梯
        item_case['property'] = response.xpath(
            "//span[contains(text(),'产权性质')]/following-sibling::span[1]//text()").extract()  # 产权性质
        item_case['calQuality'] = response.xpath(
            "//span[contains(text(),'住宅类别')]/following-sibling::span[1]//text()").extract()  # 住宅类别
        item_case['buildStructure'] = response.xpath(
            "//span[contains(text(),'建筑结构')]/following-sibling::span[1]//text()").extract()  # 建筑结构
        item_case['buildType'] = response.xpath(
            "//span[contains(text(),'建筑类别')]/following-sibling::span[1]//text()").extract()  # 建筑类别
        item_case['listingDate'] = response.xpath(
            "//span[contains(text(),'挂牌时间')]/following-sibling::span[1]/text()").extract_first()  # 挂牌日期
        item_case['other'] = response.xpath("//ul[@class='fyms_modify']/li//text()").extract()  # 其它信息
        yield item_case

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    @staticmethod
    def format_crawl_city_dict(crawl_city_dict):
        """
        城市二手房url标准化处理
        :param crawl_city_dict:
        :return:
        """
        try:
            # base_url = 'https://cq.esf.fang.com/housing/'
            base_url = 'https://bj.fang.com/'
            city_url = crawl_city_dict['city_url']
            city_url = urllib.parse.urljoin(base_url, city_url) if crawl_city_dict[
                                                                       'city_name'] != '北京' else 'https://bj.esf.fang.com/'
            assert city_url
            city_url = city_url.replace('http', 'https') if not city_url.startswith('https') else city_url
        except:
            return
        else:
            crawl_city_dict['city_url'] = city_url
            crawl_city_dict['esf_url'] = urllib.parse.urljoin(city_url, '/housing/{}')
            return crawl_city_dict

    def check_params(self, params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        try:
            if isinstance(params, str):
                try:
                    params = regex_1.sub(',', params).split(',')
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数输入错误，请重新输入，例如： scrapy crawl fangtianxia -a crawl=重庆，北京')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            elif isinstance(params, list):
                try:
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            else:
                raise AssertionError('参数类型不支持')
        except Exception as e:
            logger.error(e)
            self.crawler.engine.close_spider(self, '参数错误')

    @staticmethod
    def get_project_location(resp_str):
        """
        提取小区经纬度
        :param resp_str:
        :return:
        """
        try:
            mapx = float(re.findall(r'"mapx":"(\d+\.\d+?)"', resp_str)[0])
            mapy = float(re.findall(r'"mapy":"(\d+\.\d+?)"', resp_str)[0])
            assert mapx and mapy
        except:
            return
        else:
            return mapx, mapy

    @staticmethod
    def get_case_id(data_bg: str):
        """
        提取案例id
        :param data:
        :return:
        """
        try:
            data_bg_dict = json.loads(data_bg)
            case_id = data_bg_dict.get('houseid')
            assert case_id
        except:
            return
        else:
            return case_id

    @staticmethod
    def deal_case_info(case_info: list):
        """
        处理并提取案例信息
        :param case_info:
        :return:
        """
        case_info = [i.strip() for i in case_info if i and i.strip()]
        case_info_dict = dict()
        for case_str in case_info:
            if any(['室' in case_str, '厅' in case_str]):
                case_info_dict['roomType'] = case_str
            elif '面积' in case_str:
                case_info_dict['buildArea'] = case_str
            elif '层' in case_str:
                case_str = case_str.replace("（", "").replace("）", "")
                case_info_dict['totalFloor'] = case_str
            elif re.findall(r'[东南西北]', case_str):
                case_info_dict['directionType'] = case_str
            elif '年' in case_str:
                case_info_dict['buildYear'] = case_str
        return case_info_dict

    @staticmethod
    def deal_two_or_five(tag):
        """
        判断是否 满二 or 满五
        :param tag:
        :return:
        """
        try:
            assert tag
            if '满五' in tag:
                ret1 = True
                ret2 = True
            elif '满二' in tag:
                ret1 = False
                ret2 = True
            else:
                ret1 = False
                ret2 = False
        except:
            return False, False
        else:
            return ret1, ret2
