# -*- coding: utf-8 -*-
import scrapy

import logging
import re
from copy import copy, deepcopy
from HifoEsf.items import CommunityItem, CaseItem
import demjson
import json
import datetime

logger = logging.getLogger(__name__)


class BeikeCjSpider(scrapy.Spider):
    name = 'beike_cj'
    allowed_domains = ['ke.com']
    start_urls = ['https://www.ke.com/city/']

    custom_settings = {
        'CONCURRENT_REQUESTS': 6,
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'HifoEsf.middlewares.CustomRetryMiddleware': 500,
            # 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': None,

            'HifoEsf.middlewares.UserAgentMiddleware': 544,
            'HifoEsf.middlewares.OuterNetProxyMiddleware': 545,  # 启用redis中的代理，需要同时开启 RedisConnPipeline
        },
        'RETRY_TIMES': 6,
        'DOWNLOAD_DELAY': 0.8,
    }

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter
        super().__init__(*args, **kwargs)

    def parse(self, response, **kwargs):
        """
        爬取城市预处理
        :param response:
        :param kwargs:
        :return:
        """
        try:
            # todo 去重校验
            # assert self.filter_flag != False, f'>>>>{self.name}:案例id去重配置失败<<<<'
            # 参数校验
            cities_li, type = self.get_crawl_or_filter_cities()
        except AssertionError as e:
            # 去重校验未通关，关闭爬虫
            self.crawler.engine.close_spider(self, e)
        except Exception as e:
            logger.error(e)
            # 校验未通过，关闭爬虫
            self.crawler.engine.close_spider(self, '参数错误')
        else:
            # 校验通过，城市预处理
            if cities_li:
                headers = {'Referer': 'https://www.ke.com/'}
                # 获取省(直辖市)分组
                province_div_li = response.xpath("//div[contains(@data-action,'国内')]//div[@class='city_province']")
                for province_div in province_div_li:
                    province_name = province_div.xpath("./div/text()").extract_first().strip()
                    # 获取该省下的城市分组
                    city_data_li = province_div.xpath("./ul/li")
                    for city_data in city_data_li:
                        city_name = city_data.xpath("./a/text()").extract_first()
                        city_url = city_data.xpath("./a/@href").extract_first()
                        # url中含有fang的是新房连接
                        if 'fang' not in city_url:
                            # 爬取列表
                            if type == 1:
                                if ('全国' in cities_li) or (city_name in cities_li):
                                    yield response.follow(
                                        city_url,
                                        headers=headers,
                                        callback=self.parse_xiaoqu_url,
                                        meta=dict(province_name=deepcopy(province_name),
                                                  city_name=deepcopy(city_name), ),
                                        dont_filter=True,
                                    )
                            # 过滤列表
                            if type == 2:
                                if '全国' in cities_li:
                                    break
                                elif city_name not in cities_li:
                                    yield response.follow(
                                        city_url,
                                        headers=headers,
                                        callback=self.parse_xiaoqu_url,
                                        meta=dict(province_name=deepcopy(province_name),
                                                  city_name=deepcopy(city_name), ),
                                        dont_filter=True,
                                    )
                        else:
                            logger.warning(f'{city_name} 没有二手房链接')
            else:
                logger.error('贝壳二手房城市url列表匹配为空')
                self.crawler.engine.close_spider(self, '贝壳二手房城市url列表匹配为空')

    def parse_xiaoqu_url(self, response):
        """
        获取城市二手房小区url
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        try:
            xiaoqu_url = response.xpath("//div[@class='header']//a[contains(text(),'小区')]/@href").extract_first()
            assert xiaoqu_url and 'ke.com' in xiaoqu_url and 'fang' not in xiaoqu_url, f'{city_name} 没有小区链接'
        except AssertionError as e:
            logger.warning(e)
        else:
            # 构造请求，获取小区列表页首页(不带行政区)
            next_page_url_temp = xiaoqu_url + 'pg{}cro22/'
            yield scrapy.Request(
                xiaoqu_url + 'cro22/',
                callback=self.parse_xiaoqu_li,
                meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name), district_name=None,
                          page_num=1, next_page_url_temp=next_page_url_temp),
                dont_filter=True,
            )

    def parse_xiaoqu_li(self, response):
        """
        获取小区列表页
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        district_name = copy(response.meta['district_name'])
        page_num = copy(response.meta['page_num'])
        next_page_url_temp = copy(response.meta['next_page_url_temp'])
        xiaoqu_total_num = self.get_xiaoqu_toal_num(response)
        if isinstance(xiaoqu_total_num, int):
            # 最多显示100页数据（3000条）
            # 数量未超过3000，不区分行政区
            if 0 < xiaoqu_total_num <= 3000:
                xiaoqu_li = response.xpath("//ul[@class='listContent']/li")
                if len(xiaoqu_li):
                    for xiaoqu_obj in xiaoqu_li:
                        item_cmty = CommunityItem()
                        item_cmty['provinceName'] = province_name  # 省份
                        item_cmty['cityName'] = city_name  # 城市
                        item_cmty['name'] = xiaoqu_obj.xpath(".//div[@class='title']/a/@title").extract_first()  # 楼盘名称
                        item_cmty['communityUrl'] = xiaoqu_obj.xpath(
                            ".//div[@class='title']/a/@href").extract_first()  # 楼盘链接
                        item_cmty['districtName'] = xiaoqu_obj.xpath(
                            ".//div[@class='positionInfo']/a[@class='district']/text()").extract_first()  # 行政区
                        item_cmty['shangQuan'] = xiaoqu_obj.xpath(
                            ".//div[@class='positionInfo']/a[@class='bizcircle']/text()").extract_first()  # 商圈
                        item_cmty['buildYear'] = xiaoqu_obj.xpath(
                            ".//div[@class='positionInfo']/text()").extract()  # 建筑年代
                        item_cmty['transportFacility'] = xiaoqu_obj.xpath(
                            ".//div[@class='tagList']/span/text()").extract_first()  # 交通状况
                        item_cmty['price'] = xiaoqu_obj.xpath(
                            ".//div[@class='xiaoquListItemPrice']/div[@class='totalPrice']/span/text()").extract_first()  # 单价
                        item_cmty['other'] = xiaoqu_obj.xpath(".//div[@class='houseInfo']//text()").extract()  # 其它信息
                        # 构造请求，获取小区详情
                        yield scrapy.Request(
                            item_cmty['communityUrl'],
                            callback=self.parse_xiaoqu_detail,
                            meta=dict(item_cmty=deepcopy(item_cmty), ),
                            dont_filter=True,
                        )

                    # 翻页
                    next_page_num = page_num + 1
                    next_page_url = next_page_url_temp.format(next_page_num)
                    yield response.follow(
                        next_page_url,
                        callback=self.parse_xiaoqu_li,
                        meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name),
                                  district_name=deepcopy(district_name),
                                  next_page_url_temp=deepcopy(next_page_url_temp),
                                  page_num=next_page_num),
                        dont_filter=True,
                    )
                else:
                    logging.warning('{}-{}-{}-第{}页 小区列表为空'.format(province_name, city_name, district_name, page_num))

            # 数量超过3000，区分行政区
            elif xiaoqu_total_num > 3000:
                if page_num == 1 and not district_name:
                    district_obj_li = response.xpath("//div[@data-role='ershoufang']/div/a")
                    for district_obj in district_obj_li:
                        district_name = district_obj.xpath("./text()").extract_first()
                        district_url = district_obj.xpath("./@href").extract_first()
                        district_url = district_url.replace('cro22', 'cro22/') if district_url.endswith(
                            'cro22') else district_url
                        next_page_url_temp = district_url.replace('cro22/', 'pg{}cro22/')
                        if district_name and '周边' not in district_name and any(
                                ['北碚' in district_name, '大渡口' in district_name, '九龙坡' in district_name,
                                 '沙坪坝' in district_name, '渝中' in district_name, '江北' in district_name,
                                 '渝北' in district_name, '南岸' in district_name, '巴南' in district_name, ]):
                            yield response.follow(
                                district_url,
                                callback=self.parse_xiaoqu_li,
                                meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name),
                                          district_name=deepcopy(district_name),
                                          next_page_url_temp=deepcopy(next_page_url_temp),
                                          page_num=1),
                                dont_filter=True,
                            )
                else:
                    logging.error('{}-{}-{} 小区（带行政区）总数超过3000，需分开（商圈）获取'.format(province_name, city_name, district_name))
            else:
                logging.warning('{}-{}-{} 小区总数为0'.format(province_name, city_name, district_name))
        else:
            logging.error('{}-{}-{} 小区总数提取出错'.format(province_name, city_name, district_name))

    def parse_xiaoqu_detail(self, response):
        """
        获取小区详情
        :param response:
        :return:
        """
        item_cmty = copy(response.meta['item_cmty'])
        xiaoqu_dict = self.get_xiaoqu_info(response)
        if xiaoqu_dict:
            others_dict = xiaoqu_dict.get('others', dict())
            father_others_dict = xiaoqu_dict.get('father_others', dict())
            abstract_dict = father_others_dict.get('abstract', list())
            sold_list = father_others_dict.get('sold', list())
            item_cmty['guid'] = xiaoqu_dict.get('id', None)  # 小区id
            item_cmty['location'] = self.get_location(xiaoqu_dict.get('resblockPosition', None))  # 经纬度
            item_cmty['propertyFee'] = self.get_field(others_dict, abstract_dict, 'minTenement')  # 物业费
            item_cmty['totalNumberOfBuildings'] = self.get_field(others_dict, abstract_dict, 'buildingCount')  # 总楼栋数
            item_cmty['totalRoomCount'] = self.get_field(others_dict, abstract_dict, 'houseAmount')  # 总户数
            item_cmty['floorAreaRatio'] = self.get_field(others_dict, abstract_dict, 'curbageRate')  # 容积率
            item_cmty['greeningRate'] = self.get_field(others_dict, abstract_dict, 'virescenceRate')  # 绿化率
            item_cmty['address'] = self.get_field(others_dict, abstract_dict, 'address')  # 楼盘地址
            item_cmty['buildStructure'] = self.get_field(others_dict, abstract_dict, 'buildingType')  # 建筑结构
            item_cmty['buildYear'] = self.get_field(others_dict, abstract_dict, 'buildYear')  # 建筑年代
            item_cmty['propertyCompany'] = self.get_field(others_dict, abstract_dict, 'property')  # 物管公司
            item_cmty['developerName'] = self.get_field(others_dict, abstract_dict, 'developer')  # 开发商
            item_cmty['remarks'] = self.get_field(others_dict, abstract_dict, 'bdaTime')  # 备注
            yield item_cmty

            # 构造请求，获取挂牌案例列表首页
            esf_cmty_url = 'https://cq.ke.com/ershoufang/{}.html'
            esf_case_li_url = 'https://cq.ke.com/ershoufang/co32c{}/'
            headers = {
                'Referer': esf_cmty_url.format(item_cmty['guid']),
            }
            if item_cmty['guid']:
                yield scrapy.Request(
                    esf_case_li_url.format(item_cmty['guid']),
                    headers=headers,
                    callback=self.parse_esf_case_li,
                    meta=dict(item_cmty=deepcopy(item_cmty), page_num=1),
                    dont_filter=True,
                )

            # 遍历，提取成交案例
            if sold_list:
                for sold_dict in sold_list:
                    item_case = CaseItem()
                    item_case['provinceName'] = item_cmty['provinceName']
                    item_case['cityName'] = item_cmty['cityName']
                    item_case['districtName'] = item_cmty['districtName']
                    item_case['name'] = item_cmty['name']
                    item_case['guid'] = item_cmty['guid']  # 小区id
                    item_case['caseType'] = '贝壳成交'  # 案例类型
                    item_case['dataUrl'] = sold_dict.get('viewUrl', None)  # 案例url
                    item_case['caseId'] = self.get_case_id(item_case['dataUrl'])  # 案例id
                    item_case['roomType'] = sold_dict.get('jushi', None)  # 户型
                    item_case['buildArea'] = sold_dict.get('area', None)  # 建筑面积
                    item_case['totalFloor'] = sold_dict.get('totalFloor', None)  # 总楼层
                    item_case['buildYear'] = sold_dict.get('buildYear', None)  # 建筑年代
                    item_case['price'] = sold_dict.get('unitPrice', None)  # 单价
                    item_case['totalPrice'] = sold_dict.get('price', None)  # 总价
                    item_case['other'] = sold_dict.get('signTime', None)  # 成交日期
                    item_case['listingDate'] = sold_dict.get('signTime', None)
                    yield item_case

            else:
                logger.warning('{}-{}-{}-{} 小区成交案例为空'.format(item_cmty['provinceName'], item_cmty['cityName'],
                                                             item_cmty['districtName'], item_cmty['name']))
        else:
            logger.error('{}-{}-{}-{} 小区详情信息提取出错'.format(item_cmty['provinceName'], item_cmty['cityName'],
                                                         item_cmty['districtName'], item_cmty['name']))

    def parse_esf_case_li(self, response):
        """
        获取挂牌案例列表页
        :param response:
        :return:
        """
        item_cmty = copy(response.meta['item_cmty'])
        page_num = copy(response.meta['page_num'])
        flag = response.xpath("//p[contains(text(),'小区暂无')]/text()").extract_first()
        listing_case_li = response.xpath("//ul[@class='sellListContent']//li[@class='clear']")
        if (not flag) and len(listing_case_li):
            for listing_case in listing_case_li:
                item_case = CaseItem()
                item_case['provinceName'] = item_cmty['provinceName']
                item_case['cityName'] = item_cmty['cityName']
                item_case['districtName'] = item_cmty['districtName']
                item_case['shangQuan'] = item_cmty['shangQuan']
                item_case['name'] = item_cmty['name']
                item_case['guid'] = item_cmty['guid']  # 小区id
                item_case['caseType'] = '贝壳挂牌'  # 案例类型
                item_case['title'] = listing_case.xpath("./div/div[1]/a/@title").extract_first()  # 标题
                item_case['dataUrl'] = listing_case.xpath("./div/div[1]/a/@href").extract_first()  # 案例url
                item_case['caseId'] = self.get_case_id(item_case['dataUrl'])  # 案例id
                tag_icon = listing_case.xpath("./div/div[@class='title']/img[@class='tagIcon']/@src").extract_first()
                tag_icon = ['好赞好房'] if tag_icon else list()
                tag_block = listing_case.xpath("./div/div/span[contains(@class,'tagBlock')]/text()").extract()
                item_case['tag'] = tag_icon + tag_block + listing_case.xpath(
                    "./div/div/div[@class='tag']/span/text()").extract()  # 标签
                item_case['tag'] = list(set(item_case['tag']))
                cont_info = listing_case.xpath("./div/div/div[@class='houseInfo']/text()").extract()
                cont_dict = self.get_room_cont_info(cont_info)
                item_case['buildArea'] = cont_dict.get('buildArea', None)  # 建筑面积
                item_case['directionType'] = cont_dict.get('directionType', None)  # 朝向
                item_case['floor'] = cont_dict.get('floor', None)  # 所在楼层区
                item_case['totalFloor'] = cont_dict.get('totalFloor', None)  # 总楼层
                item_case['buildYear'] = cont_dict.get('buildYear', None)  # 建筑年代
                item_case['roomType'] = cont_dict.get('roomType', None)  # 户型

                # 构造请求，获取挂牌案例详情
                if item_case['dataUrl']:
                    yield scrapy.Request(
                        item_case['dataUrl'],
                        callback=self.parse_esf_case_detail,
                        meta=dict(item_case=deepcopy(item_case), ),
                    )

            # 翻页
            total_page_dict = json.loads(response.xpath("//div[@page-data]/@page-data").extract_first())
            total_page_num = total_page_dict.get('totalPage', 0)
            next_page_num = page_num + 1
            if next_page_num <= total_page_num:
                next_page_url = f'https://cq.ke.com/ershoufang/pg{next_page_num}co32c{item_cmty["guid"]}/'
                yield scrapy.Request(
                    next_page_url,
                    callback=self.parse_esf_case_li,
                    meta=dict(item_cmty=deepcopy(item_cmty), page_num=deepcopy(next_page_num)),
                    dont_filter=True,
                )
        else:
            logging.warning(
                '{}-{}-{}-第{}页 挂牌案例列表为空'.format(item_cmty['cityName'], item_cmty['districtName'], item_cmty['name'],
                                                page_num))

    def parse_esf_case_detail(self, response):
        """
        获取挂牌案例详情
        :param response:
        :return:
        """
        item_case = copy(response.meta['item_case'])
        # 案例详情
        item_case['totalPrice'] = response.xpath("//span[@class='total']/text()").extract_first()  # 总价
        item_case['price'] = response.xpath("//span[@class='unitPriceValue']/text()").extract_first()  # 单价
        item_case['roomType'] = ''.join(
            response.xpath("//span[contains(text(),'房屋户型')]/../text()").extract()).strip()  # 户型
        item_case['buildType'] = ''.join(
            response.xpath("//span[contains(text(),'建筑类型')]/../text()").extract()).strip()  # 建筑类型
        item_case['floor'] = ''.join(
            response.xpath("//span[contains(text(),'所在楼层')]/../text()").extract()).strip()  # 所在楼层区
        item_case['isDuplex'] = ''.join(
            response.xpath("//span[contains(text(),'户型结构')]/../text()").extract()).strip()  # 是否跃层
        item_case['innerArea'] = ''.join(
            response.xpath("//span[contains(text(),'套内面积')]/../text()").extract()).strip()  # 套内面积
        item_case['buildStructure'] = ''.join(
            response.xpath("//span[contains(text(),'建筑结构')]/../text()").extract()).strip()  # 建筑结构
        item_case['decorationType'] = ''.join(
            response.xpath("//span[contains(text(),'装修情况')]/../text()").extract()).strip()  # 装修
        item_case['elevatorRatio'] = ''.join(
            response.xpath("//span[contains(text(),'梯户比例')]/../text()").extract()).strip()  # 梯户比例
        item_case['isElevator'] = ''.join(
            response.xpath("//span[contains(text(),'配备电梯')]/../text()").extract()).strip()  # 是否有电梯
        item_case['listingDate'] = ''.join(
            response.xpath("//span[contains(text(),'挂牌时间')]/../text()").extract()).strip()  # 挂牌日期
        if item_case['listingDate']:
            item_case['listingDate'] = datetime.datetime.strptime(item_case['listingDate'],
                                                                  '%Y年%m月%d日').date().strftime('%Y-%m-%d')
        item_case['property'] = ''.join(
            response.xpath("//span[contains(text(),'交易权属')]/../text()").extract()).strip()  # 产权性质
        item_case['calQuality'] = ''.join(
            response.xpath("//span[contains(text(),'房屋用途')]/../text()").extract()).strip()  # 住宅类别
        year_str = ''.join(
            response.xpath("//span[contains(text(),'房屋年限')]/../text()").extract()).strip()
        item_case['isFiveYear'], item_case['isTwoYear'] = self.deal_two_or_five(year_str)  # 是否满五年、是否满两年
        item_case['propertyDescription'] = ''.join(
            response.xpath("//span[contains(text(),'产权所属')]/../text()").extract()).strip()  # 产权类型
        item_case['other'] = [i.strip() for i in
                              response.xpath("//div[@class='introContent showbasemore']//text()").extract() if
                              i and i.strip()]  # 其它信息
        yield item_case

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    @staticmethod
    def check_params(params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        if isinstance(params, str):
            try:
                params_li = regex_1.sub(',', params).split(',')
                ret_li = [param.strip() for param in params_li if
                          isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                assert ret_li and len(ret_li) == len(params_li)
            except:
                logger.error('参数输入错误，请重新输入，例如： scrapy crawl beike_cj -a crawl=重庆，北京')
                raise AssertionError(f'错误参数>>>>>>{params}')
            else:
                return ret_li
        elif isinstance(params, list):
            try:
                ret_li = [param.strip() for param in params if
                          isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                assert ret_li and len(ret_li) == len(params)
            except:
                logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                raise AssertionError(f'错误参数>>>>>>{params}')
            else:
                return ret_li
        else:
            raise AssertionError(f'错误参数>>>>>>{params}')

    @staticmethod
    def get_xiaoqu_toal_num(response):
        """
        提取小区总数
        :param response:
        :return:
        """
        try:
            xiaoqu_total_num = int(response.xpath("//h2[contains(text(),'共找到')]/span/text()").extract_first())
        except:
            return
        else:
            return xiaoqu_total_num

    @staticmethod
    def get_xiaoqu_info(response):
        """
        提取小区详情信息
        :param response:
        :return:
        """
        try:
            resp = response.body.decode()
            ret = re.findall(r'window.GLOBAL_INFOS.*?({.*?})\n</script>', resp, re.S)[0]
            ret_dict = demjson.decode(ret)
        except Exception as e:
            logger.error(e)
            return
        else:
            return ret_dict

    @staticmethod
    def get_location(location):
        """
        提取小区经纬度
        :param location:
        :return:
        """
        try:
            assert location
            location = tuple([float(i) for i in location.split(',')])
        except:
            return
        else:
            return location

    @staticmethod
    def get_field(dict1, dict2, key):
        """
        提取字段信息
        :param dict1:
        :param dict2:
        :param key:
        :return:
        """
        try:
            ret = dict1.get(key, None) if dict1.get(key, None) else dict2.get(key, None)
            assert ret
        except:
            return
        else:
            return ret

    @staticmethod
    def get_case_id(case_url: str):
        """
        提取案例id
        :param case_url: https://cq.ke.com/chengjiao/106110927069.html
        :return:
        """
        try:
            if 'chengjiao' in case_url:
                case_id = case_url.split('chengjiao/')[1].split('.html')[0]
            else:
                case_id = case_url.split('ershoufang/')[1].split('.html')[0]
            assert case_id
        except Exception as e:
            logger.error('{} 案例id提取出错，error:{}'.format(case_url, e))
            return
        else:
            return case_id

    def get_room_cont_info(self, cont_info: list):
        """
        提取案例部分字段信息（建筑面积、朝向、所在楼层区、总楼层、建筑年代、户型）
        :param cont_info:
        :return:
        """
        cont_info = ''.join(cont_info).split('|')
        ret_dict = dict()
        try:
            ret_li = [''.join(i.split()) for i in cont_info]
            for ret in ret_li:
                if ('㎡' in ret) or ('平米' in ret):
                    ret_dict['buildArea'] = ret
                elif len(re.findall(r'[东南西北]', ret)):
                    ret_dict['directionType'] = ret
                elif '层' in ret:
                    ret_dict['floor'], ret_dict['totalFloor'] = self.deal_floor(ret)
                elif '年' in ret:
                    ret_dict['buildYear'] = ret
                elif any(['室' in ret, '厅' in ret, '卫' in ret]):
                    ret_dict['roomType'] = ret
        except Exception as e:
            logger.error(e)
            return ret_dict
        else:
            return ret_dict

    @staticmethod
    def deal_floor(floor: str):
        """
        处理楼层
        :param floor:
        :return:
        """
        try:
            total_floor = re.findall(r'共(.*?)层', floor)[0]
        except:
            total_floor = None
        return floor, total_floor

    @staticmethod
    def deal_two_or_five(year: str):
        """
        判断是否 满二 or 满五
        :param year:
        :return:
        """
        try:
            assert year
            if '满五' in year:
                ret1 = True
                ret2 = True
            elif '满二' in year:
                ret1 = False
                ret2 = True
            else:
                ret1 = False
                ret2 = False
        except:
            return False, False
        else:
            return ret1, ret2
