# -*- coding: utf-8 -*-
import scrapy

import re
import logging
import pandas as pd
from HifoFzz.utils.anjuke_cities import ANJUKE_CITY_LIST
from copy import copy, deepcopy
from scrapy_pyppeteer.request import PyppeteerRequest
from HifoFzz.items import (
    ShopsItem,
    OfficeItem,
    WarehouseItem,
    WorkshopItem,
    LandItem,
    ParkingItem
)

logger = logging.getLogger(__name__)


class AnjukeSpider(scrapy.Spider):
    name = 'anjuke'
    allowed_domains = ['anjuke.com']
    start_urls = ['https://www.anjuke.com/sy-city.html']

    custom_settings = {
        'CONCURRENT_REQUESTS': 3,
        # 'DOWNLOAD_DELAY': 1,
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'HifoFzz.middlewares.CustomRetryMiddleware': 500,
            # 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': None,

            'HifoFzz.middlewares.UserAgentMiddleware': 544,
            'HifoFzz.middlewares.OuterNetProxyMiddleware': 545,  # 启用redis中的代理，需要同时开启 RedisConnPipeline
            'scrapy_pyppeteer.downloadermiddlewares.PyppeteerMiddleware': 566,
        },
        'DEFAULT_REQUEST_HEADERS': {
            'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
            'accept-encoding': 'gzip, deflate, br',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        },
        'GERAPY_PYPPETEER_HEADLESS': True,
        'GERAPY_ENABLE_REQUEST_INTERCEPTION': True,
        'GERAPY_PYPPETEER_DEVTOOLS': False,
        'GERAPY_PYPPETEER_DUMPIO': True,
        'COOKIES_ENABLED': False,  # 不携带cookies
        'CASE_ID_FILTER': True,  # 是否开启案例id去重
    }

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        super(AnjukeSpider, self).__init__(*args, **kwargs)
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter

    def parse(self, response, **kwargs):
        """
        爬取城市预处理
        :param response:
        :param kwargs:
        :return:
        """
        try:
            # 去重校验
            assert self.filter_flag != False, f'>>>>{self.name}:案例id去重配置失败<<<<'
            # 参数校验
            cities_li, type = self.get_crawl_or_filter_cities()
        except AssertionError as e:
            # 去重校验未通关，关闭爬虫
            self.crawler.engine.close_spider(self, e)
        except Exception as e:
            logger.error(e)
            # 校验未通过，关闭爬虫
            self.crawler.engine.close_spider(self, '参数错误')
        else:
            # 校验通过，城市预处理
            if cities_li:
                cities_df = pd.DataFrame(ANJUKE_CITY_LIST)
                # 爬取列表
                if type == 1:
                    # 如果爬取列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = cities_df.to_dict(orient='records')
                    else:
                        crawl_city_li = cities_df.loc[cities_df.city_name.isin(cities_li)].to_dict(orient='records')
                        # 判断爬取列表城市是否有非住宅url
                        crawl_cities = [crawl_city_dict['city_name'] for crawl_city_dict in crawl_city_li]
                        for city_name in cities_li:
                            logger.warning('{} 非住宅url为空'.format(city_name)) if city_name not in crawl_cities else False
                # 过滤列表
                else:
                    # 如果过滤列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = list()
                        self.crawler.engine.close_spider(self, '过滤列表包含【全国】')
                    # 不包含 全国
                    else:
                        crawl_city_li = cities_df.loc[~cities_df.city_name.isin(cities_li)].to_dict(orient='records')

                # 构造城市非住宅请求
                for fzz_request in self.create_city_request(crawl_city_li):
                    # todo 添加请求过滤条件，按照类型【商铺、写字楼、厂房、仓库、土地、车位】分别爬取
                    yield fzz_request
                del cities_df
            else:
                logger.error('安居客非住宅城市url列表匹配为空')
                self.crawler.engine.close_spider(self, '安居客非住宅城市url列表匹配为空')

    def create_city_request(self, crawl_city_li):
        """
        构造城市非住宅请求
        :param crawl_city_li: 爬取城市列表，包含url构造信息
        :return:
        """
        try:
            assert len(crawl_city_li)
        except:
            self.crawler.engine.close_spider(self, '安居客非住宅城市爬取列表为空')
        else:
            for crawl_city_dict in crawl_city_li:
                # 城市非住宅url标准化处理
                crawl_city_dict = self.format_crawl_city_dict(crawl_city_dict)
                if crawl_city_dict:
                    province_name = crawl_city_dict['province_name']
                    city_name = crawl_city_dict['city_name']
                    # 行政区/县列表
                    district_data_li = crawl_city_dict['district_data_li']
                    # 遍历，获取行政区/县
                    for district_dict in district_data_li:
                        district_name, district_data = list(district_dict.items())[0]

                        # 获取【商铺出租】案例列表首页
                        sp_zu_url = crawl_city_dict['sp_zu'] + f'{district_data}-px1-pv2/'
                        sp_zu_headers = {
                            'referer': crawl_city_dict['sp_zu']
                        }
                        yield scrapy.Request(
                            sp_zu_url,
                            headers=sp_zu_headers,
                            callback=self.parse_sp_li,
                            meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                                      page_num=1, category='商铺出租',
                                      dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                            dont_filter=True,
                        )

                        # 获取【商铺出售】案例列表首页
                        sp_shou_url = crawl_city_dict['sp_shou'] + f'{district_data}-px1-pv2/'
                        sp_shou_headers = {
                            'referer': crawl_city_dict['sp_shou']
                        }
                        yield scrapy.Request(
                            sp_shou_url,
                            headers=sp_shou_headers,
                            callback=self.parse_sp_li,
                            meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                                      page_num=1, category='商铺出售',
                                      dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                            dont_filter=True,
                        )

                        # 获取【写字楼出租】案例列表首页
                        xzl_zu_url = crawl_city_dict['xzl_zu'] + f'{district_data}-px1-pv2/'
                        xzl_zu_headers = {
                            'referer': crawl_city_dict['xzl_zu']
                        }
                        yield scrapy.Request(
                            xzl_zu_url,
                            headers=xzl_zu_headers,
                            callback=self.parse_xzl_li,
                            meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                                      page_num=1, category='写字楼出租',
                                      dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                            dont_filter=True,
                        )

                        # 获取【写字楼出售】案例列表首页
                        xzl_shou_url = crawl_city_dict['xzl_shou'] + f'{district_data}-px1-pv2/'
                        xzl_shou_headers = {
                            'referer': crawl_city_dict['xzl_shou']
                        }
                        yield scrapy.Request(
                            xzl_shou_url,
                            headers=xzl_shou_headers,
                            callback=self.parse_xzl_li,
                            meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                                      page_num=1, category='写字楼出售',
                                      dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                            dont_filter=True,
                        )

                        # 获取【仓库出租】案例列表首页
                        ck_zu_url = crawl_city_dict['ck_zu'] + f'{district_data}-px1-pv2/'
                        ck_zu_headers = {
                            'referer': crawl_city_dict['ck_zu']
                        }
                        yield scrapy.Request(
                            ck_zu_url,
                            headers=ck_zu_headers,
                            callback=self.parse_ck_li,
                            meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                                      page_num=1, category='仓库出租',
                                      dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                            dont_filter=True,
                        )

                        # 获取【仓库出售】案例列表首页
                        ck_shou_url = crawl_city_dict['ck_shou'] + f'{district_data}-px1-pv2/'
                        ck_shou_headers = {
                            'referer': crawl_city_dict['ck_shou']
                        }
                        yield scrapy.Request(
                            ck_shou_url,
                            headers=ck_shou_headers,
                            callback=self.parse_ck_li,
                            meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                                      page_num=1, category='仓库出售',
                                      dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                            dont_filter=True,
                        )

                        # 获取【仓库转让】案例列表首页
                        ck_zhuan_url = crawl_city_dict['ck_zhuan'] + f'{district_data}-px1-pv2/'
                        ck_zhuan_headers = {
                            'referer': crawl_city_dict['ck_zhuan']
                        }
                        yield scrapy.Request(
                            ck_zhuan_url,
                            headers=ck_zhuan_headers,
                            callback=self.parse_ck_li,
                            meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                                      page_num=1, category='仓库转让',
                                      dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                            dont_filter=True,
                        )

                        # 获取【厂房出租】案例列表首页
                        cf_zu_url = crawl_city_dict['cf_zu'] + f'{district_data}-px1-pv2/'
                        cf_zu_headers = {
                            'referer': crawl_city_dict['cf_zu']
                        }
                        yield scrapy.Request(
                            cf_zu_url,
                            headers=cf_zu_headers,
                            callback=self.parse_cf_li,
                            meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                                      page_num=1, category='厂房出租',
                                      dont_redirect=True,
                                      handle_httpstatus_list=[301, 302, 307], ),
                            dont_filter=True,
                        )

                        # 获取【厂房出售】案例列表首页
                        cf_shou_url = crawl_city_dict['cf_shou'] + f'{district_data}-px1-pv2/'
                        cf_shou_headers = {
                            'referer': crawl_city_dict['cf_shou']
                        }
                        yield scrapy.Request(
                            cf_shou_url,
                            headers=cf_shou_headers,
                            callback=self.parse_cf_li,
                            meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                                      page_num=1, category='厂房出售',
                                      dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                            dont_filter=True,
                        )

                        # 获取【厂房转让】案例列表首页
                        cf_zhuan_url = crawl_city_dict['cf_zhuan'] + f'{district_data}-px1-pv2/'
                        cf_zhuan_headers = {
                            'referer': crawl_city_dict['cf_zhuan']
                        }
                        yield scrapy.Request(
                            cf_zhuan_url,
                            headers=cf_zhuan_headers,
                            callback=self.parse_cf_li,
                            meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                                      page_num=1, category='厂房转让',
                                      dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                            dont_filter=True,
                        )

                    # 【土地板块】和【车位板块】数据量较少，暂不划分行政区进行爬取
                    # 获取【土地出租】案例列表首页
                    td_zu_url = crawl_city_dict['td_zu'] + 'px1-pv2/'
                    td_zu_headers = {
                        'referer': crawl_city_dict['td_zu']
                    }
                    yield scrapy.Request(
                        td_zu_url,
                        headers=td_zu_headers,
                        callback=self.parse_td_li,
                        meta=dict(city_name=city_name, province_name=province_name,
                                  page_num=1, category='土地出租',
                                  dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                        dont_filter=True,
                    )

                    # 获取【土地出售】案例列表首页
                    td_shou_url = crawl_city_dict['td_shou'] + 'px1-pv2/'
                    td_shou_headers = {
                        'referer': crawl_city_dict['td_shou']
                    }
                    yield scrapy.Request(
                        td_shou_url,
                        headers=td_shou_headers,
                        callback=self.parse_td_li,
                        meta=dict(city_name=city_name, province_name=province_name,
                                  page_num=1, category='土地出售',
                                  dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                        dont_filter=True,
                    )

                    # 获取【土地转让】案例列表首页
                    td_zhuan_url = crawl_city_dict['td_zhuan'] + 'px1-pv2/'
                    td_zhuan_headers = {
                        'referer': crawl_city_dict['td_zhuan']
                    }
                    yield scrapy.Request(
                        td_zhuan_url,
                        headers=td_zhuan_headers,
                        callback=self.parse_td_li,
                        meta=dict(city_name=city_name, province_name=province_name,
                                  page_num=1, category='土地转让',
                                  dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                        dont_filter=True,
                    )

                    # 获取【车位出租】案例列表首页
                    cw_zu_url = crawl_city_dict['cw_zu'] + 'px1-pv2/'
                    cw_zu_headers = {
                        'referer': crawl_city_dict['cw_zu']
                    }
                    yield scrapy.Request(
                        cw_zu_url,
                        headers=cw_zu_headers,
                        callback=self.parse_cw_li,
                        meta=dict(city_name=city_name, province_name=province_name,
                                  page_num=1, category='车位出租',
                                  dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                        dont_filter=True,
                    )

                    # 获取【车位出售】案例列表首页
                    cw_shou_url = crawl_city_dict['cw_shou'] + 'px1-pv2/'
                    cw_shou_headers = {
                        'referer': crawl_city_dict['cw_shou']
                    }
                    yield scrapy.Request(
                        cw_shou_url,
                        headers=cw_shou_headers,
                        callback=self.parse_cw_li,
                        meta=dict(city_name=city_name, province_name=province_name,
                                  page_num=1, category='车位出售',
                                  dont_redirect=True,
                                  handle_httpstatus_list=[301, 302, 307], ),
                        dont_filter=True,
                    )

                    # 获取【车位转让】案例列表首页
                    cw_zhuan_url = crawl_city_dict['cw_zhuan'] + 'px1-pv2/'
                    cw_zhuan_headers = {
                        'referer': crawl_city_dict['cw_zhuan']
                    }
                    yield scrapy.Request(
                        cw_zhuan_url,
                        headers=cw_zhuan_headers,
                        callback=self.parse_cw_li,
                        meta=dict(city_name=city_name, province_name=province_name,
                                  page_num=1, category='车位转让',
                                  dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                        dont_filter=True,
                    )
                else:
                    logger.error('{} 城市非住宅url无法标准化'.format(crawl_city_dict['city_name']))

    def parse_sp_li(self, response):
        """
        获取商铺案例列表
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        district_name = copy(response.meta['district_name'])
        page_num = copy(response.meta['page_num'])
        category = copy(response.meta['category'])
        case_div_li = response.xpath("//div[@class='list-left']/div[@class='list-item']")
        if case_div_li:
            for case_div in case_div_li:
                item_sp = ShopsItem()
                item_sp['provinceName'] = province_name  # 省份
                item_sp['cityName'] = city_name  # 城市
                item_sp['category'] = category  # 类型
                item_sp['caseUrl'] = case_div.xpath("./a/@href").extract_first()  # 案例url
                item_sp['title'] = case_div.xpath("./a//span[@class='title']/text()").extract_first()  # 案例标题
                item_sp['districtName'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[1]/text()").extract_first()  # 行政区
                item_sp['shangQuan'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[2]/text()").extract_first()  # 商圈
                item_sp['operationState'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[5]/text()").extract_first()  # 经营状态
                item_sp['propertyType'] = case_div.xpath(
                    ".//div[@class='item-info']/p[2]/span[1]/text()").extract_first()  # 物业类型
                item_sp['floorLevels'] = case_div.xpath(
                    ".//div[@class='item-info']/p[2]/span[2]/text()").extract_first()  # 案例楼层
                item_sp['buildArea'] = case_div.xpath(
                    ".//div[@class='item-area']/p[@class='area']/span/text()").extract()  # 建筑面积
                item_sp['other'] = case_div.xpath(".//p[@class='item-tag']/span/text()").extract()  # 其它
                item_sp['isStreet'] = self.is_street(item_sp['propertyType'], item_sp['other'])  # 是否临街
                # 商铺出租案例
                if 'sp-zu' in response.request.url:
                    item_sp['monthlyRental'] = case_div.xpath(
                        ".//div[@class='price-monthly']/span/text()").extract()  # 月租金
                    item_sp['dailyRental'] = case_div.xpath(".//div[@class='price-daily']/span/text()").extract()  # 日租金
                # 商铺出售案例
                elif 'sp-shou' in response.request.url:
                    item_sp['totalPrice'] = case_div.xpath(
                        ".//div[@class='price-monthly']/span/text()").extract()  # 案例总价
                    item_sp['price'] = case_div.xpath(
                        ".//div[@class='price-daily']/span[position()>1]/text()").extract()  # 案例单价
                # 其它类型案例
                else:
                    logger.warning('{} 暂不支持该类型'.format(response.request.url))
                item_sp['caseId'] = self.get_case_id(item_sp['caseUrl'])  # 案例id
                # 获取商铺案例详情
                if item_sp['caseId']:
                    # 根据案例id去重处理
                    if (self.settings['CASE_ID_FILTER'] and self.redis_conn.sadd('anjuke_case_id', item_sp['caseId'])) \
                            or (not self.settings['CASE_ID_FILTER']):
                        yield PyppeteerRequest(
                            item_sp['caseUrl'],
                            callback=self.parse_sp_detail,
                            pretend=True,
                            wait_for='span.content',
                            meta=dict(item_sp=deepcopy(item_sp), ),
                            ignore_resource_types=['image', 'media'],
                        )
                else:
                    logger.error('{}-{}-{} 案例id、案例url提取出错'.format(city_name, category, item_sp['title']))
        else:
            logger.warning('{}-{}-{}-第{}页 案例列表提取为空'.format(city_name, district_name, category, page_num))

        # 翻页
        next_page_url = response.xpath("//span[contains(text(),'下一页')]/../@href").extract_first()
        next_page_num = page_num + 1
        if next_page_url:
            yield scrapy.Request(
                next_page_url,
                callback=self.parse_sp_li,
                meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                          page_num=next_page_num, category=category,
                          dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                dont_filter=True,
            )
        else:
            logger.warning('{}-{}-{}-第{}页 提取翻页请求url为空'.format(city_name, district_name, category, next_page_num))

    def parse_sp_detail(self, response):
        """
        获取商铺案例详情
        :param response:
        :return:
        """
        item_sp = copy(response.meta['item_sp'])
        item_sp['listingDate'] = response.xpath("//div[@class='time']/text()").extract_first()  # 发布时间
        item_sp['shopNature'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'商铺性质')]/following-sibling::span[1]/text()").extract_first()  # 商铺性质
        item_sp['paymentMethod'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'押付')]/following-sibling::span[1]/text()").extract_first()  # 押付
        item_sp['transferFee'] = response.xpath(
            "//div[@id='basic-info']//span[text()='转让费']/following-sibling::span[1]/text()").extract_first()  # 转让费
        item_sp['minimumLeaseTerm'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'起租期')]/following-sibling::span[1]/text()").extract_first()  # 起租期
        item_sp['remainingLeaseTerm'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'剩余租期')]/following-sibling::span[1]/text()").extract_first()  # 剩余租期
        item_sp['name'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'物业')]/following-sibling::span[1]/text()").extract_first()  # 楼盘名称
        item_sp['address'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'地址')]/following-sibling::span[1]/text()").extract_first()  # 楼盘地址
        item_sp['customer'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'客流人群')]/following-sibling::span[1]/text()").extract_first()  # 客流人群
        item_sp['specs'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'规格')]/following-sibling::span[1]/text()").extract_first()  # 规格
        item_sp['width'], item_sp['depth'], item_sp['floorHeight'] = self.get_width_and_depth_and_floor_height(
            item_sp['specs']) if item_sp['specs'] else (None, None, None)  # 面宽、进深、层高
        floor_str = response.xpath("//span[contains(text(),'楼层')]/following-sibling::span[1]/text()").extract_first()
        item_sp['totalFloor'] = self.get_total_floor(floor_str)  # 总层数
        item_sp['support'] = response.xpath("//ul[@class='peitao-content']/li[not(@class)]/p/text()").extract()  # 配套
        item_sp['houseAdvantages'] = response.xpath(
            "//p[contains(text(),'房源亮点')]/following-sibling::article/text()").extract()  # 房源亮点
        item_sp['surroundingCustomer'] = response.xpath(
            "//p[contains(text(),'周边客流')]/following-sibling::article/text()").extract()  # 周边客流
        item_sp['businessFormat'] = response.xpath(
            "//p[contains(text(),'适合行业')]/following-sibling::article/text()").extract()  # 适合行业
        item_sp['serviceIntroduction'] = response.xpath(
            "//p[contains(text(),'服务介绍')]/following-sibling::article/text()").extract()  # 服务介绍
        item_sp['grossBuildArea'] = response.xpath(
            "//span[contains(text(),'总建面')]/following-sibling::span/text()").extract_first()  # 总建筑面积
        item_sp['occupancyRate'] = response.xpath(
            "//span[contains(text(),'得房率')]/following-sibling::span/text()").extract_first()  # 得房率
        item_sp['completionDate'] = response.xpath(
            "//span[contains(text(),'竣工时间')]/following-sibling::span/text()").extract_first()  # 竣工时间
        item_sp['propertyYears'] = response.xpath(
            "//span[contains(text(),'产权年限')]/following-sibling::span/text()").extract_first()  # 产权年限
        item_sp['propertyCompany'] = response.xpath(
            "//span[contains(text(),'物业公司')]/following-sibling::span/text()").extract_first()  # 物业公司
        item_sp['lng'], item_sp['lat'] = self.get_lng_and_lat(response.body.decode())  # 经度、纬度
        yield item_sp

    def parse_xzl_li(self, response):
        """
        获取写字楼案例列表
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        district_name = copy(response.meta['district_name'])
        page_num = copy(response.meta['page_num'])
        category = copy(response.meta['category'])
        case_div_li = response.xpath("//div[@class='list-left']/div[@class='list-item']")
        if case_div_li:
            for case_div in case_div_li:
                item_xzl = OfficeItem()
                item_xzl['provinceName'] = province_name  # 省份
                item_xzl['cityName'] = city_name  # 城市
                item_xzl['category'] = category  # 类型
                item_xzl['caseUrl'] = case_div.xpath("./a/@href").extract_first()  # 案例url
                item_xzl['title'] = case_div.xpath("./a//span[@class='title']/text()").extract_first()  # 案例标题
                item_xzl['districtName'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[1]/text()").extract_first()  # 行政区
                item_xzl['locationArea'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[2]/text()").extract_first()  # 所属区域
                item_xzl['officeType'] = case_div.xpath(
                    ".//div[@class='item-info']/p[2]/span[1]/text()").extract_first()  # 写字楼类型
                item_xzl['floorType'] = case_div.xpath(
                    ".//div[@class='item-info']/p[2]/span[2]/text()").extract_first()  # 楼层类型
                item_xzl['buildArea'] = case_div.xpath(
                    ".//div[@class='item-area']/p[@class='area']/span/text()").extract()  # 建筑面积
                # 写字楼出租案例
                if 'xzl-zu' in response.request.url:
                    item_xzl['monthlyRental'] = case_div.xpath(
                        ".//div[@class='price-daily']/span/text()").extract()  # 月租金
                    item_xzl['dailyRental'] = case_div.xpath(
                        ".//div[@class='price-monthly']/span/text()").extract()  # 日租金
                # 写字楼出售案例
                elif 'xzl-shou' in response.request.url:
                    item_xzl['totalPrice'] = case_div.xpath(
                        ".//div[@class='price-monthly']/span/text()").extract()  # 案例总价
                    item_xzl['price'] = case_div.xpath(
                        ".//div[@class='price-daily']/span[position()>1]/text()").extract()  # 案例单价
                # 其它类型案例
                else:
                    logger.warning('{} 暂不支持该类型'.format(response.request.url))
                item_xzl['caseId'] = self.get_case_id(item_xzl['caseUrl'])  # 案例id
                # 获取写字楼案例详情
                if item_xzl['caseId']:
                    # 根据案例id去重处理
                    if (self.settings['CASE_ID_FILTER'] and self.redis_conn.sadd('anjuke_case_id', item_xzl['caseId'])) \
                            or (not self.settings['CASE_ID_FILTER']):
                        yield PyppeteerRequest(
                            item_xzl['caseUrl'],
                            callback=self.parse_xzl_detail,
                            pretend=True,
                            wait_for='span.content',
                            meta=dict(item_xzl=deepcopy(item_xzl), ),
                            ignore_resource_types=['image', 'media'],
                        )
                else:
                    logger.error('{}-{}-{} 案例id、案例url提取出错'.format(city_name, category, item_xzl['title']))
        else:
            logger.warning('{}-{}-{}-第{}页 案例列表提取为空'.format(city_name, district_name, category, page_num))

        # 翻页
        next_page_url = response.xpath("//span[contains(text(),'下一页')]/../@href").extract_first()
        next_page_num = page_num + 1
        if next_page_url:
            yield scrapy.Request(
                next_page_url,
                callback=self.parse_xzl_li,
                meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                          page_num=next_page_num, category=category,
                          dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                dont_filter=True,
            )
        else:
            logger.warning('{}-{}-{}-第{}页 提取翻页请求url为空'.format(city_name, district_name, category, next_page_num))

    def parse_xzl_detail(self, response):
        """
        获取写字楼案例详情
        :param response:
        :return:
        """
        item_xzl = copy(response.meta['item_xzl'])
        item_xzl['listingDate'] = response.xpath("//div[@class='time']/text()").extract_first()  # 发布时间
        item_xzl['projectUrl'] = response.xpath("//a[@class='loupanDetail']/@href").extract_first()  # 楼盘url
        item_xzl['officeNature'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'性质')]/following-sibling::span[1]/text()").extract_first()  # 写字楼性质
        item_xzl['paymentMethod'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'付款')]/following-sibling::span[1]/text()").extract_first()  # 押付
        item_xzl['minimumLeaseTerm'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'起租期')]/following-sibling::span[1]/text()").extract_first()  # 起租期
        item_xzl['decorationState'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'装修')]/following-sibling::span[1]/text()").extract_first()  # 装修情况
        item_xzl['utilization'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'使用率')]/following-sibling::span[1]/text()").extract_first()  # 使用率
        item_xzl['workingSeat'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'工位数')]/following-sibling::span[1]/text()").extract_first()  # 工位数
        item_xzl['name'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'楼盘')]/following-sibling::span[1]/text()").extract_first()  # 楼盘名称
        item_xzl['isDivision'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'分割')]/following-sibling::span[1]/text()").extract_first()  # 可否分割
        item_xzl['isRegister'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'注册')]/following-sibling::span[1]/text()").extract_first()  # 是否可注册
        item_xzl['address'] = response.xpath(
            "//span[contains(text(),'地址')]/following-sibling::span[1]/text()").extract_first()  # 楼盘地址
        item_xzl['cost'] = response.xpath(
            "//span[contains(text(),'相关费用')]/following-sibling::span[1]/text()").extract_first()  # 相关费用
        item_xzl['support'] = response.xpath("//ul[@class='peitao-content']/li[not(@class)]/p/text()").extract()  # 配套
        item_xzl['houseAdvantages'] = response.xpath(
            "//p[contains(text(),'房源亮点')]/following-sibling::article/text()").extract()  # 房源亮点
        item_xzl['projectSupport'] = response.xpath(
            "//p[contains(text(),'配套设施')]/following-sibling::article/text()").extract()  # 配套设施
        item_xzl['communityMark'] = response.xpath(
            "//p[contains(text(),'项目优势')]/following-sibling::article/text()").extract()  # 项目特色
        item_xzl['serviceIntroduction'] = response.xpath(
            "//p[contains(text(),'服务介绍')]/following-sibling::article/text()").extract()  # 服务介绍
        item_xzl['grossBuildArea'] = response.xpath(
            "//span[contains(text(),'总建面')]/following-sibling::span/text()").extract_first()  # 总建筑面积
        item_xzl['occupancyRate'] = response.xpath(
            "//span[contains(text(),'得房率')]/following-sibling::span/text()").extract_first()  # 得房率
        item_xzl['completionDate'] = response.xpath(
            "//span[contains(text(),'竣工时间')]/following-sibling::span/text()").extract_first()  # 竣工时间
        item_xzl['propertyYears'] = response.xpath(
            "//span[contains(text(),'产权年限')]/following-sibling::span/text()").extract_first()  # 产权年限
        item_xzl['propertyCompany'] = response.xpath(
            "//span[contains(text(),'物业公司')]/following-sibling::span/text()").extract_first()  # 物业公司
        floor_str = response.xpath("//span[contains(text(),'楼层')]/following-sibling::span[1]/text()").extract_first()
        item_xzl['totalFloor'] = self.get_total_floor(floor_str)  # 总层数
        item_xzl['lng'], item_xzl['lat'] = self.get_lng_and_lat(response.body.decode())  # 经度、纬度
        # todo 经讨论，楼盘详情后续再进行爬取，先保存楼盘url       --2021/8/10
        yield item_xzl

    def parse_ck_li(self, response):
        """
        获取仓库案例列表
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        district_name = copy(response.meta['district_name'])
        page_num = copy(response.meta['page_num'])
        category = copy(response.meta['category'])
        case_div_li = response.xpath("//div[@class='list-left']/div[@class='list-item']")
        if case_div_li:
            for case_div in case_div_li:
                item_ck = WarehouseItem()
                item_ck['provinceName'] = province_name  # 省份
                item_ck['cityName'] = city_name  # 城市
                item_ck['category'] = category  # 类型
                item_ck['caseUrl'] = case_div.xpath("./a/@href").extract_first()  # 案例url
                item_ck['title'] = case_div.xpath("./a//span[@class='title']/text()").extract_first()  # 案例标题
                item_ck['districtName'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[1]/text()").extract_first()  # 行政区
                item_ck['locationArea'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[2]/text()").extract_first()  # 区域
                item_ck['isEIA'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[6]/text()").extract_first()  # 可办环评
                item_ck['buildArea'] = case_div.xpath(
                    ".//div[@class='item-area']/p[@class='area']/span/text()").extract()  # 建筑面积
                item_ck['caseId'] = self.get_case_id(item_ck['caseUrl'])  # 案例id
                # 获取仓库案例详情
                if item_ck['caseId']:
                    # 根据案例id去重处理
                    if (self.settings['CASE_ID_FILTER'] and self.redis_conn.sadd('anjuke_case_id', item_ck['caseId'])) \
                            or (not self.settings['CASE_ID_FILTER']):
                        yield PyppeteerRequest(
                            item_ck['caseUrl'],
                            callback=self.parse_ck_detail,
                            pretend=True,
                            wait_for='span.content',
                            meta=dict(item_ck=deepcopy(item_ck), ),
                            ignore_resource_types=['image', 'media'],
                        )
                else:
                    logger.error('{}-{}-{} 案例id、案例url提取出错'.format(city_name, category, item_ck['title']))
        else:
            logger.warning('{}-{}-{}-第{}页 案例列表提取为空'.format(city_name, district_name, category, page_num))

        # 翻页
        next_page_url = response.xpath("//span[contains(text(),'下一页')]/../@href").extract_first()
        next_page_num = page_num + 1
        if next_page_url:
            yield scrapy.Request(
                next_page_url,
                callback=self.parse_ck_li,
                meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                          page_num=next_page_num, category=category,
                          dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                dont_filter=True,
            )
        else:
            logger.warning('{}-{}-{}-第{}页 提取翻页请求url为空'.format(city_name, district_name, category, next_page_num))

    def parse_ck_detail(self, response):
        """
        获取仓库案例详情
        :param response:
        :return:
        """
        item_ck = copy(response.meta['item_ck'])
        # 仓库出租案例
        if 'ck-zu' in response.request.url:
            item_ck['monthlyRental'] = response.xpath("//div[@class='price-one-month']/span/text()").extract()  # 月租金
            item_ck['dailyRental'] = response.xpath("//div[@class='price-one-day']/span/text()").extract()  # 日租金
        # 仓库出售案例
        elif 'ck-shou' in response.request.url:
            item_ck['totalPrice'] = response.xpath("//div[@class='price-one-month']/span/text()").extract()  # 案例总价
            item_ck['price'] = response.xpath("//div[@class='price-one-day']/span/text()").extract()  # 案例单价
        # 仓库转让案例
        elif 'ck-zhuan' in response.request.url:
            item_ck['monthlyRental'] = response.xpath("//div[@class='price-one-month']/span/text()").extract()  # 月租金
            item_ck['transferFee'] = response.xpath(
                "//div[@class='price-one-day']//span[@class='transfer-fee']/text()").extract()  # 转让费
        # 其它类型案例
        else:
            logger.warning('{} 暂不支持该类型'.format(response.request.url))

        item_ck['listingDate'] = response.xpath("//div[@class='time']/text()").extract_first()  # 发布时间
        item_ck['warehouseType'] = response.xpath(
            "//div[@class='type']/div[@class='value']/text()").extract_first()  # 仓库类型
        item_ck['warehouseStructure'] = response.xpath(
            "//span[contains(text(),'仓库结构')]/following-sibling::span[1]/text()").extract_first()  # 仓库结构
        item_ck['landQuality'] = response.xpath(
            "//span[contains(text(),'土地性质')]/following-sibling::span[1]/text()").extract_first()  # 土地性质
        item_ck['propertyYears'] = response.xpath(
            "//span[contains(text(),'产权年限')]/following-sibling::span[1]/text()").extract_first()  # 产权年限
        item_ck['remainingLeaseTerm'] = response.xpath(
            "//span[contains(text(),'剩余租期')]/following-sibling::span[1]/text()").extract_first()  # 剩余租期
        if not item_ck['remainingLeaseTerm']:
            item_ck['remainingLeaseTerm'] = response.xpath(
                "//div[text()='剩余租期']//preceding-sibling::div/text()").extract_first()  # 剩余租期
        item_ck['address'] = response.xpath(
            "//span[contains(text(),'地址')]/following-sibling::span[1]/text()").extract_first()  # 地址
        item_ck['floorType'] = response.xpath(
            "//span[contains(text(),'楼层')]/following-sibling::span[1]/text()").extract_first()  # 楼层
        item_ck['firstFloorHeight'] = response.xpath(
            "//span[contains(text(),'首层层高')]/following-sibling::span[1]/text()").extract_first()  # 首层层高
        item_ck['paymentMethod'] = response.xpath(
            "//span[contains(text(),'押付')]/following-sibling::span[1]/text()").extract_first()  # 押付
        item_ck['minimumLeaseTerm'] = response.xpath(
            "//span[contains(text(),'起租期')]/following-sibling::span[1]/text()").extract_first()  # 起租期
        item_ck['minimumLeaseArea'] = response.xpath(
            "//span[contains(text(),'起租面积')]/following-sibling::span[1]/text()").extract_first()  # 起租面积
        item_ck['leaseMethod'] = response.xpath(
            "//span[contains(text(),'租赁方式')]/following-sibling::span[1]/text()").extract_first()  # 租赁方式
        item_ck['entryCondition'] = response.xpath(
            "//span[contains(text(),'进车情况')]/following-sibling::span[1]/text()").extract_first()  # 进车情况
        item_ck['supplyVoltage'] = response.xpath(
            "//span[contains(text(),'供电电压')]/following-sibling::span[1]/text()").extract_first()  # 供电电压
        item_ck['flatMaterial'] = response.xpath(
            "//span[contains(text(),'地平材质')]/following-sibling::span[1]/text()").extract_first()  # 地平材质
        item_ck['flatBearing'] = response.xpath(
            "//span[contains(text(),'地平承重')]/following-sibling::span[1]/text()").extract_first()  # 地平承重
        item_ck['fireGrade'] = response.xpath(
            "//span[contains(text(),'消防等级')]/following-sibling::span[1]/text()").extract_first()  # 消防等级
        item_ck['isBonded'] = response.xpath(
            "//span[contains(text(),'保税仓')]/following-sibling::span[1]/text()").extract_first()  # 保税仓
        item_ck['unloadingMethod'] = response.xpath(
            "//span[contains(text(),'卸货')]/following-sibling::span[1]/text()").extract_first()  # 卸货
        item_ck['unloadingEquipment'] = response.xpath(
            "//span[contains(text(),'卸货设备')]/following-sibling::span[1]/text()").extract_first()  # 卸货设备
        item_ck['warehouseAdvantages'] = response.xpath(
            "//span[contains(text(),'仓库特色')]/following-sibling::span[1]/text()").extract_first()  # 仓库特色
        item_ck['support'] = response.xpath("//ul[@class='peitao-content']/li[not(@class)]/p/text()").extract()  # 配套
        item_ck['houseAdvantages'] = response.xpath(
            "//p[contains(text(),'房源亮点')]/following-sibling::article/text()").extract()  # 房源亮点
        item_ck['lng'], item_ck['lat'] = self.get_lng_and_lat(response.body.decode())  # 经度、纬度
        yield item_ck

    def parse_cf_li(self, response):
        """
        获取厂房案例列表
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        district_name = copy(response.meta['district_name'])
        page_num = copy(response.meta['page_num'])
        category = copy(response.meta['category'])
        case_div_li = response.xpath("//div[@class='list-left']/div[@class='list-item']")
        if case_div_li:
            for case_div in case_div_li:
                item_cf = WorkshopItem()
                item_cf['provinceName'] = province_name  # 省份
                item_cf['cityName'] = city_name  # 城市
                item_cf['category'] = category  # 类型
                item_cf['caseUrl'] = case_div.xpath("./a/@href").extract_first()  # 案例url
                item_cf['title'] = case_div.xpath("./a//span[@class='title']/text()").extract_first()  # 案例标题
                item_cf['districtName'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[1]/text()").extract_first()  # 行政区
                item_cf['locationArea'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[2]/text()").extract_first()  # 区域
                item_cf['isEIA'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[6]/text()").extract_first()  # 可办环评
                item_cf['buildArea'] = case_div.xpath(
                    ".//div[@class='item-area']/p[@class='area']/span/text()").extract()  # 建筑面积
                item_cf['caseId'] = self.get_case_id(item_cf['caseUrl'])  # 案例id
                # 获取仓库案例详情
                if item_cf['caseId']:
                    # 根据案例id去重处理
                    if (self.settings['CASE_ID_FILTER'] and self.redis_conn.sadd('anjuke_case_id', item_cf['caseId'])) \
                            or (not self.settings['CASE_ID_FILTER']):
                        yield PyppeteerRequest(
                            item_cf['caseUrl'],
                            callback=self.parse_cf_detail,
                            pretend=True,
                            wait_for='span.content',
                            meta=dict(item_cf=deepcopy(item_cf), ),
                            ignore_resource_types=['image', 'media'],
                        )
                else:
                    logger.error('{}-{}-{} 案例id、案例url提取出错'.format(city_name, category, item_cf['title']))
        else:
            logger.warning('{}-{}-{}-第{}页 案例列表提取为空'.format(city_name, district_name, category, page_num))

        # 翻页
        next_page_url = response.xpath("//span[contains(text(),'下一页')]/../@href").extract_first()
        next_page_num = page_num + 1
        if next_page_url:
            yield scrapy.Request(
                next_page_url,
                callback=self.parse_cf_li,
                meta=dict(city_name=city_name, province_name=province_name, district_name=district_name,
                          page_num=next_page_num, category=category,
                          dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                dont_filter=True,
            )
        else:
            logger.warning('{}-{}-{}-第{}页 提取翻页请求url为空'.format(city_name, district_name, category, next_page_num))

    def parse_cf_detail(self, response):
        """
        获取厂房案例详情
        :param response:
        :return:
        """
        item_cf = copy(response.meta['item_cf'])
        # 厂房出租案例
        if 'cf-zu' in response.request.url:
            item_cf['monthlyRental'] = response.xpath("//div[@class='price-one-month']/span/text()").extract()  # 月租金
            item_cf['dailyRental'] = response.xpath("//div[@class='price-one-day']/span/text()").extract()  # 日租金
        # 厂房出售案例
        elif 'cf-shou' in response.request.url:
            item_cf['totalPrice'] = response.xpath("//div[@class='price-one-month']/span/text()").extract()  # 案例总价
            item_cf['price'] = response.xpath("//div[@class='price-one-day']/span/text()").extract()  # 案例单价
        # 厂房转让案例
        elif 'cf-zhuan' in response.request.url:
            item_cf['monthlyRental'] = response.xpath("//div[@class='price-one-month']/span/text()").extract()  # 月租金
            item_cf['transferFee'] = response.xpath(
                "//div[@class='price-one-day']//span[@class='transfer-fee']/text()").extract()  # 转让费
        # 其它类型案例
        else:
            logger.warning('{} 暂不支持该类型'.format(response.request.url))

        item_cf['listingDate'] = response.xpath("//div[@class='time']/text()").extract_first()  # 发布时间
        item_cf['factoryType'] = response.xpath(
            "//div[@class='type']/div[@class='value']/text()").extract_first()  # 厂房类型
        item_cf['paymentMethod'] = response.xpath(
            "//span[contains(text(),'押付')]/following-sibling::span[1]/text()").extract_first()  # 押付
        item_cf['minimumLeaseTerm'] = response.xpath(
            "//span[contains(text(),'起租期')]/following-sibling::span[1]/text()").extract_first()  # 起租期
        item_cf['leaseMethod'] = response.xpath(
            "//span[contains(text(),'租赁方式')]/following-sibling::span[1]/text()").extract_first()  # 租赁方式
        item_cf['address'] = response.xpath(
            "//span[contains(text(),'地址')]/following-sibling::span[1]/text()").extract_first()  # 地址
        item_cf['landQuality'] = response.xpath(
            "//span[contains(text(),'土地性质')]/following-sibling::span[1]/text()").extract_first()  # 土地性质
        item_cf['propertyYears'] = response.xpath(
            "//span[contains(text(),'产权年限')]/following-sibling::span[1]/text()").extract_first()  # 产权年限
        item_cf['remainingLeaseTerm'] = response.xpath(
            "//span[contains(text(),'剩余租期')]/following-sibling::span[1]/text()").extract_first()  # 剩余租期
        if not item_cf['remainingLeaseTerm']:
            item_cf['remainingLeaseTerm'] = response.xpath(
                "//div[text()='剩余租期']//preceding-sibling::div/text()").extract_first()  # 剩余租期
        item_cf['floorType'] = response.xpath(
            "//span[contains(text(),'楼层')]/following-sibling::span[1]/text()").extract_first()  # 楼层
        item_cf['firstFloorHeight'] = response.xpath(
            "//span[contains(text(),'首层层高')]/following-sibling::span[1]/text()").extract_first()  # 首层层高
        item_cf['factoryNew'] = response.xpath(
            "//span[contains(text(),'厂房新旧')]/following-sibling::span[1]/text()").extract_first()  # 厂房新旧
        item_cf['factoryStructure'] = response.xpath(
            "//span[contains(text(),'厂房结构')]/following-sibling::span[1]/text()").extract_first()  # 厂房结构
        item_cf['floorBearing'] = response.xpath(
            "//span[contains(text(),'楼板承重')]/following-sibling::span[1]/text()").extract_first()  # 楼板承重
        item_cf['supplyVoltage'] = response.xpath(
            "//span[contains(text(),'供电电压')]/following-sibling::span[1]/text()").extract_first()  # 供电电压
        item_cf['fireRecord'] = response.xpath(
            "//span[contains(text(),'消防备案')]/following-sibling::span[1]/text()").extract_first()  # 消防备案
        item_cf['factoryAdvantages'] = response.xpath(
            "//span[contains(text(),'厂房特色')]/following-sibling::span[1]/text()").extract_first()  # 厂房特色
        item_cf['support'] = response.xpath("//ul[@class='peitao-content']/li[not(@class)]/p/text()").extract()  # 配套
        item_cf['houseAdvantages'] = response.xpath(
            "//p[contains(text(),'房源亮点')]/following-sibling::article/text()").extract()  # 房源亮点
        item_cf['lng'], item_cf['lat'] = self.get_lng_and_lat(response.body.decode())  # 经度、纬度
        yield item_cf

    def parse_td_li(self, response):
        """
        获取土地案例列表
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        page_num = copy(response.meta['page_num'])
        category = copy(response.meta['category'])
        case_div_li = response.xpath("//div[@class='list-left']/div[@class='list-item']")
        if case_div_li:
            for case_div in case_div_li:
                item_td = LandItem()
                item_td['provinceName'] = province_name  # 省份
                item_td['cityName'] = city_name  # 城市
                item_td['category'] = category  # 类型
                item_td['caseUrl'] = case_div.xpath("./a/@href").extract_first()  # 案例url
                item_td['title'] = case_div.xpath("./a//span[@class='title']/text()").extract_first()  # 案例标题
                item_td['districtName'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[1]/text()").extract_first()  # 行政区
                item_td['locationArea'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[2]/text()").extract_first()  # 区域
                item_td['buildArea'] = case_div.xpath(
                    ".//div[@class='item-area']/p[@class='area']/span/text()").extract()  # 建筑面积
                item_td['caseId'] = self.get_case_id(item_td['caseUrl'])  # 案例id
                # 获取土地案例详情
                if item_td['caseId']:
                    # 根据案例id去重处理
                    if (self.settings['CASE_ID_FILTER'] and self.redis_conn.sadd('anjuke_case_id', item_td['caseId'])) \
                            or (not self.settings['CASE_ID_FILTER']):
                        yield PyppeteerRequest(
                            item_td['caseUrl'],
                            callback=self.parse_td_detail,
                            pretend=True,
                            wait_for='span.content',
                            meta=dict(item_td=deepcopy(item_td), ),
                            ignore_resource_types=['image', 'media'],
                        )
                else:
                    logger.error('{}-{}-{} 案例id、案例url提取出错'.format(city_name, category, item_td['title']))
        else:
            logger.warning('{}-{}-第{}页 案例列表提取为空'.format(city_name, category, page_num))

        # 翻页
        next_page_url = response.xpath("//span[contains(text(),'下一页')]/../@href").extract_first()
        next_page_num = page_num + 1
        if next_page_url:
            yield scrapy.Request(
                next_page_url,
                callback=self.parse_td_li,
                meta=dict(city_name=city_name, province_name=province_name,
                          page_num=next_page_num, category=category,
                          dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                dont_filter=True,
            )
        else:
            logger.warning('{}-{}-第{}页 提取翻页请求url为空'.format(city_name, category, next_page_num))

    def parse_td_detail(self, response):
        """
        获取土地案例详情
        :param response:
        :return:
        """
        item_td = copy(response.meta['item_td'])
        # 土地出租案例
        if 'td-zu' in response.request.url:
            item_td['monthlyRental'] = response.xpath("//div[@class='price-one-month']/span/text()").extract()  # 月租金
            item_td['dailyRental'] = response.xpath("//div[@class='price-one-day']/span/text()").extract()  # 日租金
        # 土地出售案例
        elif 'td-shou' in response.request.url:
            item_td['totalPrice'] = response.xpath("//div[@class='price-one-month']/span/text()").extract()  # 案例总价
            item_td['price'] = response.xpath("//div[@class='price-one-day']/span/text()").extract()  # 案例单价
        # 土地转让案例
        elif 'td-zhuan' in response.request.url:
            item_td['monthlyRental'] = response.xpath("//div[@class='price-one-month']/span/text()").extract()  # 月租金
            item_td['transferFee'] = response.xpath(
                "//div[@class='price-one-day']//span[@class='transfer-fee']/text()").extract()  # 转让费
        # 其它类型案例
        else:
            logger.warning('{} 暂不支持该类型'.format(response.request.url))

        item_td['listingDate'] = response.xpath("//div[@class='time']/text()").extract_first()  # 发布时间
        item_td['planningUse'] = response.xpath(
            "//div[@class='type']/div[@class='value']/text()").extract_first()  # 规划用途
        item_td['downpayment'] = response.xpath(
            "//div[text()='首付']/preceding-sibling::div/text()").extract_first()  # 首付
        item_td['paymentMethod'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'押付')]/following-sibling::span[1]/text()").extract_first()  # 押付
        item_td['minimumLeaseTerm'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'起租期')]/following-sibling::span[1]/text()").extract_first()  # 起租期
        item_td['leaseMethod'] = response.xpath(
            "//span[contains(text(),'租赁方式')]/following-sibling::span[1]/text()").extract_first()  # 租赁方式
        item_td['address'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'地址')]/following-sibling::span[1]/text()").extract_first()  # 地址
        item_td['landOwnership'] = response.xpath(
            "//span[contains(text(),'所有权')]/following-sibling::span[1]/text()").extract_first()  # 所有权
        item_td['remainingLeaseTerm'] = response.xpath(
            "//span[contains(text(),'剩余租期')]/following-sibling::span[1]/text()").extract_first()  # 剩余租期
        if not item_td['remainingLeaseTerm']:
            item_td['remainingLeaseTerm'] = response.xpath(
                "//div[text()='剩余租期']//preceding-sibling::div/text()").extract_first()  # 剩余租期
        item_td['houseAdvantages'] = response.xpath(
            "//p[contains(text(),'房源亮点')]/following-sibling::article/text()").extract()  # 房源亮点
        item_td['lng'], item_td['lat'] = self.get_lng_and_lat(response.body.decode())  # 经度、纬度
        yield item_td

    def parse_cw_li(self, response):
        """
        获取车位案例列表
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        page_num = copy(response.meta['page_num'])
        category = copy(response.meta['category'])
        case_div_li = response.xpath("//div[@class='list-left']/div[@class='list-item']")
        if case_div_li:
            for case_div in case_div_li:
                item_cw = ParkingItem()
                item_cw['provinceName'] = province_name  # 省份
                item_cw['cityName'] = city_name  # 城市
                item_cw['category'] = category  # 类型
                item_cw['caseUrl'] = case_div.xpath("./a/@href").extract_first()  # 案例url
                item_cw['title'] = case_div.xpath("./a//span[@class='title']/text()").extract_first()  # 案例标题
                item_cw['districtName'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[1]/text()").extract_first()  # 行政区
                item_cw['locationArea'] = case_div.xpath(
                    ".//div[@class='item-info']/p[1]/span[2]/text()").extract_first()  # 区域
                item_cw['buildArea'] = case_div.xpath(
                    ".//div[@class='item-area']/p[@class='area']/span/text()").extract()  # 建筑面积
                item_cw['caseId'] = self.get_case_id(item_cw['caseUrl'])  # 案例id
                # 获取土地案例详情
                if item_cw['caseId']:
                    # 根据案例id去重处理
                    if (self.settings['CASE_ID_FILTER'] and self.redis_conn.sadd('anjuke_case_id', item_cw['caseId'])) \
                            or (not self.settings['CASE_ID_FILTER']):
                        yield PyppeteerRequest(
                            item_cw['caseUrl'],
                            callback=self.parse_cw_detail,
                            pretend=True,
                            wait_for='span.content',
                            meta=dict(item_cw=deepcopy(item_cw), ),
                            ignore_resource_types=['image', 'media'],
                        )
                else:
                    logger.error('{}-{}-{} 案例id、案例url提取出错'.format(city_name, category, item_cw['title']))
        else:
            logger.warning('{}-{}-第{}页 案例列表提取为空'.format(city_name, category, page_num))

        # 翻页
        next_page_url = response.xpath("//span[contains(text(),'下一页')]/../@href").extract_first()
        next_page_num = page_num + 1
        if next_page_url:
            yield scrapy.Request(
                next_page_url,
                callback=self.parse_cw_li,
                meta=dict(city_name=city_name, province_name=province_name,
                          page_num=next_page_num, category=category,
                          dont_redirect=True, handle_httpstatus_list=[301, 302, 307], ),
                dont_filter=True,
            )
        else:
            logger.warning('{}-{}-第{}页 提取翻页请求url为空'.format(city_name, category, next_page_num))

    def parse_cw_detail(self, response):
        """
        获取车位案例详情
        :param response:
        :return:
        """
        item_cw = copy(response.meta['item_cw'])
        # 车位出租案例
        if 'cw-zu' in response.request.url:
            item_cw['monthlyRental'] = response.xpath("//div[@class='price-one-month']/span/text()").extract()  # 月租金
            item_cw['dailyRental'] = response.xpath("//div[@class='price-one-day']/span/text()").extract()  # 日租金
        # 车位出售案例
        elif 'cw-shou' in response.request.url:
            item_cw['totalPrice'] = response.xpath("//div[@class='price-one-month']/span/text()").extract()  # 案例总价
            item_cw['price'] = response.xpath("//div[@class='price-one-day']/span/text()").extract()  # 案例单价
        # 车位转让cw
        elif 'cw-zhuan' in response.request.url:
            item_cw['monthlyRental'] = response.xpath("//div[@class='price-one-month']/span/text()").extract()  # 月租金
            item_cw['transferFee'] = response.xpath(
                "//div[@class='price-one-day']//span[@class='transfer-fee']/text()").extract()  # 转让费
        # 其它类型案例
        else:
            logger.warning('{} 暂不支持该类型'.format(response.request.url))

        item_cw['listingDate'] = response.xpath("//div[@class='time']/text()").extract_first()  # 发布时间
        item_cw['downpayment'] = response.xpath(
            "//div[text()='首付']/preceding-sibling::div/text()").extract_first()  # 首付
        item_cw['paymentMethod'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'押付')]/following-sibling::span[1]/text()").extract_first()  # 押付
        item_cw['minimumLeaseTerm'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'起租期')]/following-sibling::span[1]/text()").extract_first()  # 起租期
        item_cw['address'] = response.xpath(
            "//div[@id='basic-info']//span[contains(text(),'地址')]/following-sibling::span[1]/text()").extract_first()  # 地址
        item_cw['remainingLeaseTerm'] = response.xpath(
            "//span[contains(text(),'剩余租期')]/following-sibling::span[1]/text()").extract_first()  # 剩余租期
        if not item_cw['remainingLeaseTerm']:
            item_cw['remainingLeaseTerm'] = response.xpath(
                "//div[text()='剩余租期']//preceding-sibling::div/text()").extract_first()  # 剩余租期
        item_cw['houseAdvantages'] = response.xpath(
            "//p[contains(text(),'房源亮点')]/following-sibling::article/text()").extract()  # 房源亮点
        item_cw['lng'], item_cw['lat'] = self.get_lng_and_lat(response.body.decode())  # 经度、纬度
        yield item_cw

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    @staticmethod
    def check_params(params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        if isinstance(params, str):
            try:
                params_li = regex_1.sub(',', params).split(',')
                ret_li = [param.strip() for param in params_li if
                          isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                assert ret_li and len(ret_li) == len(params_li)
            except:
                logger.error('参数输入错误，请重新输入，例如： scrapy crawl anjuke -a crawl=重庆，北京')
                raise AssertionError(f'错误参数>>>>>>{params}')
            else:
                return ret_li
        elif isinstance(params, list):
            try:
                ret_li = [param.strip() for param in params if
                          isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                assert ret_li and len(ret_li) == len(params)
            except:
                logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                raise AssertionError(f'错误参数>>>>>>{params}')
            else:
                return ret_li
        else:
            raise AssertionError(f'错误参数>>>>>>{params}')

    @staticmethod
    def format_crawl_city_dict(crawl_city_dict):
        """
        城市非住宅url标准化处理
        :param crawl_city_dict:
        :return:
        """
        try:
            sp_zu_url = crawl_city_dict['sp_zu']
            assert sp_zu_url
            base_url = sp_zu_url.split('com/')[0] + 'com/'
            cf_zu_url = crawl_city_dict.get('cf_zu', None)
        except:
            return
        else:
            crawl_city_dict['ck_zhuan'] = base_url + 'ck-zhuan/'
            crawl_city_dict['cf_zhuan'] = base_url + 'cf-zhuan/'
            crawl_city_dict['td_zhuan'] = base_url + 'td-zhuan/'
            crawl_city_dict['cw_zhuan'] = base_url + 'cw-zhuan/'
            if (not cf_zu_url) or (not isinstance(cf_zu_url, str)):
                crawl_city_dict['cf_zu'] = base_url + 'cf-zu/'
                crawl_city_dict['cf_shou'] = base_url + 'cf-shou/'
                crawl_city_dict['ck_zu'] = base_url + 'ck-zu/'
                crawl_city_dict['ck_shou'] = base_url + 'ck-shou/'
                crawl_city_dict['td_zu'] = base_url + 'td-zu/'
                crawl_city_dict['td_shou'] = base_url + 'td-shou/'
                crawl_city_dict['cw_zu'] = base_url + 'cw-zu/'
                crawl_city_dict['cw_shou'] = base_url + 'cw-shou/'
                return crawl_city_dict
            else:
                return crawl_city_dict

    @staticmethod
    def get_case_id(case_url):
        """
        提取案例id
        :param case_url:
        :return:
        """
        regex = re.compile(r'houseid=(.*?)&')
        try:
            case_id = regex.findall(case_url)[0]
            assert case_id
        except:
            return
        else:
            return case_id

    @staticmethod
    def get_width_and_depth_and_floor_height(specs_str) -> tuple:
        """
        提取规格（面宽、进深、层高）
        :param specs_str:
        :return:
        """
        regex_width = re.compile(r'面宽(\d+?)')
        regex_depth = re.compile(r'进深(\d+?)')
        regex_floor_height = re.compile(r'层高(\d+?)')
        width = regex_width.findall(specs_str)[0] if specs_str and len(regex_width.findall(specs_str)) else specs_str
        depth = regex_depth.findall(specs_str)[0] if specs_str and len(regex_depth.findall(specs_str)) else specs_str
        floor_height = regex_floor_height.findall(specs_str)[0] if specs_str and len(
            regex_floor_height.findall(specs_str)) else specs_str
        return width, depth, floor_height

    @staticmethod
    def get_total_floor(floor_str):
        """
        提取总楼层
        :param floor_str:
        :return:
        """
        try:
            ret_li = floor_str.split('/')
            assert len(ret_li) == 2
            total_floor_str = ret_li[1]
        except:
            return floor_str
        else:
            return total_floor_str

    @staticmethod
    def get_lng_and_lat(resp_str) -> tuple:
        """
        提取经度和纬度
        :param resp_str:
        :return:
        """
        lng_regex = re.compile(r'lng:.*?(\d+\.\d+?),')
        lat_regex = re.compile(r'lat:.*?(\d+\.\d+?),')
        lng = lng_regex.findall(resp_str)[0] if resp_str and len(lng_regex.findall(resp_str)) == 1 else None
        lat = lat_regex.findall(resp_str)[0] if resp_str and len(lat_regex.findall(resp_str)) == 1 else None
        return lng, lat

    @staticmethod
    def is_street(property_str, other_list):
        """
        判断是否临街
        :param property_str: 物业类型
        :param other_list: 其它
        :return:
        """
        try:
            assert property_str or other_list
            if isinstance(other_list, list) and '临街' in other_list:
                ret = True
            elif isinstance(property_str, str) and '临街' in property_str:
                ret = True
            else:
                ret = False
        except:
            return
        else:
            return ret
