import scrapy

import logging
import pandas as pd
import re
from HifoFzz.utils.office86_cities import OFFICE86_CITY_LIST
from copy import copy, deepcopy
from HifoFzz.items import OfficeItem

logger = logging.getLogger(__name__)


class Office86Spider(scrapy.Spider):
    name = 'office86'
    allowed_domains = ['86office.com']
    start_urls = ['http://cq.86office.com/office/']

    custom_settings = {
        'CONCURRENT_REQUESTS': 6,
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'HifoFzz.middlewares.CustomRetryMiddleware': 500,
            # 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': None,

            'HifoFzz.middlewares.UserAgentMiddleware': 544,
            'HifoFzz.middlewares.OuterNetProxyMiddleware': 545,  # 启用redis中的代理，需要同时开启 RedisConnPipeline
        },
        'COOKIES_ENABLED': False,  # 不携带cookies
        'CASE_ID_FILTER': True,
    }

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter
        super().__init__(*args, **kwargs)

    def parse(self, response, **kwargs):
        """
        爬取城市预处理
        :param response:
        :param kwargs:
        :return:
        """
        try:
            # todo 去重校验
            assert self.filter_flag != False, f'>>>>{self.name}:案例id去重配置失败<<<<'
            # 参数校验
            cities_li, type = self.get_crawl_or_filter_cities()
        except AssertionError as e:
            # 去重校验未通关，关闭爬虫
            self.crawler.engine.close_spider(self, e)
        except Exception as e:
            logger.error(e)
            # 校验未通过，关闭爬虫
            self.crawler.engine.close_spider(self, '参数错误')
        else:
            # 校验通过，城市预处理
            if cities_li:
                cities_df = pd.DataFrame(OFFICE86_CITY_LIST)
                # 爬取列表
                if type == 1:
                    # 如果爬取列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = cities_df.to_dict(orient='records')
                    else:
                        crawl_city_li = cities_df.loc[cities_df.city_name.isin(cities_li)].to_dict(orient='records')
                        # 判断爬取列表城市是否有写字楼url
                        crawl_cities = [crawl_city_dict['city_name'] for crawl_city_dict in crawl_city_li]
                        for city_name in cities_li:
                            logger.warning('{} 写字楼url为空'.format(city_name)) if city_name not in crawl_cities else False
                # 过滤列表
                else:
                    # 如果过滤列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = list()
                        self.crawler.engine.close_spider(self, '过滤列表包含【全国】')
                    # 不包含 全国
                    else:
                        crawl_city_li = cities_df.loc[~cities_df.city_name.isin(cities_li)].to_dict(orient='records')

                # 构造城市非住宅请求
                for fzz_request in self.create_city_request(crawl_city_li):
                    yield fzz_request
                del cities_df
            else:
                logger.error('86写字楼城市url列表匹配为空')
                self.crawler.engine.close_spider(self, '86写字楼城市url列表匹配为空')

    def create_city_request(self, crawl_city_li):
        """
        构造城市写字楼请求
        :param crawl_city_li: 爬取城市列表，包含url构造信息
        :return:
        """
        try:
            assert len(crawl_city_li)
        except:
            self.crawler.engine.close_spider(self, '86写字楼城市爬取列表为空')
        else:
            for crawl_city_dict in crawl_city_li:
                # 城市写字楼url标准化处理
                crawl_city_dict = self.format_crawl_city_dict(crawl_city_dict)
                if crawl_city_dict:
                    page_num = 1
                    page_param = f'_orderby-saupdatedt_isasc-1_pagenow-{page_num}.htm' if page_num > 1 else '_orderby-saupdatedt_isasc-1.htm'
                    headers = {
                        'Referer': crawl_city_dict['city_url'],
                    }
                    # 构造请求，获取【写字楼出租】案例列表首页
                    xzl_zu_url = crawl_city_dict['xzl_zu']
                    yield scrapy.Request(
                        xzl_zu_url.format(page_param),
                        headers=headers,
                        callback=self.parse_xzl_list,
                        meta=dict(crawl_city_dict=deepcopy(crawl_city_dict), page_num=deepcopy(page_num),
                                  category='写字楼出租', ),
                        dont_filter=True,
                    )
                    # 构造请求，获取【写字楼出售】案例列表首页
                    xzl_shou_url = crawl_city_dict['xzl_shou']
                    yield scrapy.Request(
                        xzl_shou_url.format(page_param),
                        headers=headers,
                        callback=self.parse_xzl_list,
                        meta=dict(crawl_city_dict=deepcopy(crawl_city_dict), page_num=deepcopy(page_num),
                                  category='写字楼出售', ),
                        dont_filter=True,
                    )

    def parse_xzl_list(self, response):
        """
        获取写字楼案例列表
        :param response:
        :return:
        """
        crawl_city_dict = copy(response.meta['crawl_city_dict'])
        page_num = copy(response.meta['page_num'])
        category = copy(response.meta['category'])
        case_obj_li = response.xpath("//div[@class='Zloupan2']/ul/li")
        if len(case_obj_li):
            for case_obj in case_obj_li:
                item_xzl = OfficeItem()
                item_xzl['provinceName'] = crawl_city_dict['province_name']  # 省份
                item_xzl['cityName'] = crawl_city_dict['city_name']  # 城市
                item_xzl['category'] = category  # 类型
                item_xzl['caseUrl'] = case_obj.xpath("./div[@class='news_lpfont']/div/a/@href").extract_first()  # 案例url
                item_xzl['caseId'] = self.get_case_id(item_xzl['caseUrl'])  # 案例id
                item_xzl['title'] = case_obj.xpath("./div[@class='news_lpfont']/div/a/text()").extract_first()  # 案例标题
                item_xzl['name'] = case_obj.xpath("./div[@class='news_lpfont']/div/a/@title").extract_first()  # 楼盘名称
                item_xzl['districtName'] = case_obj.xpath(
                    ".//div[@class='ftlt_htfont']/div[1]//span/text()").extract_first()  # 行政区
                item_xzl['shangQuan'] = case_obj.xpath(
                    ".//div[@class='ftlt_htfont']/div[2]//span/text()").extract_first()  # 商圈
                item_xzl['officeType'] = case_obj.xpath(
                    ".//div[@class='ftlt_htfont']/div[3]//span/text()").extract_first()  # 写字楼类型
                item_xzl['buildArea'] = case_obj.xpath(
                    ".//div[@class='news_ment']/div[1]//text()").extract()  # 建筑面积
                if category == '写字楼出租':
                    item_xzl['dailyRental'] = case_obj.xpath(
                        ".//div[@class='news_ment']/div[2]//text()").extract()  # 日租金
                else:
                    item_xzl['price'] = case_obj.xpath(
                        ".//div[@class='news_ment']/div[2]//text()").extract()  # 案例单价
                # 构造请求，获取案例详情
                if item_xzl['caseUrl'] and item_xzl['caseId']:
                    # 根据案例id去重处理
                    if (self.settings['CASE_ID_FILTER'] and self.redis_conn.sadd('office86_case_id',
                                                                                 item_xzl['caseId'])) or (
                            not self.settings['CASE_ID_FILTER']):
                        yield response.follow(
                            item_xzl['caseUrl'],
                            callback=self.parse_xzl_detail,
                            meta=dict(item_xzl=deepcopy(item_xzl), ),
                            dont_filter=False,
                        )
                else:
                    logger.error('{}-{}-第{}页-{} 案例url提取为空'.format(crawl_city_dict['city_name'], category, page_num,
                                                                  item_xzl['name']))
        else:
            logger.warning('{}-{}-第{}页 案例列表为空'.format(crawl_city_dict['city_name'], category, page_num, ))

        # 翻页
        next_page_url = response.xpath("//a[contains(text(),'下一页')]/@href").extract_first()
        next_page_num = page_num + 1
        if next_page_url:
            yield response.follow(
                next_page_url,
                callback=self.parse_xzl_list,
                meta=dict(crawl_city_dict=deepcopy(crawl_city_dict), page_num=deepcopy(next_page_num),
                          category=deepcopy(category), ),
                dont_filter=True,
            )
        else:
            logger.warning('{}-{}-第{}页 案例列表翻页请求url为空'.format(crawl_city_dict['city_name'], category, next_page_num, ))

    def parse_xzl_detail(self, response):
        """
        获取案例详情
        :param response:
        :return:
        """
        item_xzl = copy(response.meta['item_xzl'])
        item_xzl['caseUrl'] = response.request.url  # 案例url
        item_xzl['listingDate'] = response.xpath("//div[@class='m_floatright']/div[1]/text()").extract_first()  # 发布时间
        item_xzl['propertyFee'] = response.xpath(
            "//div[@class='m_cherimg']//span[contains(text(),'物管费')]/../text()").extract_first()  # 物业费
        item_xzl['caseGrade'] = response.xpath(
            "//div[@class='m_cherimg']//span[contains(text(),'级别')]/../text()").extract_first()  # 案例等级
        item_xzl['decorationState'] = response.xpath(
            "//div[@class='m_cherimg']//span[contains(text(),'装修')]/../text()").extract_first()  # 装修情况
        item_xzl['paymentMethod'] = response.xpath(
            "//div[@class='m_cherimg']//span[contains(text(),'付款')]/../text()").extract_first()  # 押付
        item_xzl['address'] = response.xpath(
            "//div[@class='m_cherimg']//span[contains(text(),'地址')]/../text()").extract_first()  # 地址
        item_xzl['floorType'] = response.xpath(
            "//div[@class='m_cherimg']//span[contains(text(),'楼层')]/../text()").extract_first()  # 楼层类型
        item_xzl['projectBrief'] = response.xpath(
            "//div[contains(text(),'房源描述')]/following-sibling::div[1]//text()").extract()  # 房源描述
        item_xzl['officeNature'] = response.xpath(
            "//div[@class='m_fximg']//span[contains(text(),'类型')]/../text()").extract_first()  # 写字楼性质
        item_xzl['entryDate'] = response.xpath(
            "//div[@class='m_fximg']//span[contains(text(),'入住时间')]/../text()").extract_first()  # 入驻时间
        item_xzl['officeType'] = response.xpath(
            "//div[@class='m_fximg']//span[contains(text(),'级别')]/../text()").extract_first()  # 写字楼类型
        item_xzl['elevator'] = response.xpath(
            "//div[@class='m_fximg']//span[contains(text(),'梯')]/..//text()").extract()  # 电梯配套
        item_xzl['totalFloor'] = response.xpath(
            "//div[@class='m_fximg']//span[contains(text(),'总楼层')]/../text()").extract_first()  # 总楼层
        item_xzl['floorHeight'] = response.xpath(
            "//div[@class='m_fximg']//span[contains(text(),'高')]/../text()").extract_first()  # 层高
        item_xzl['netFloorHeight'] = response.xpath(
            "//div[@class='m_fximg']//span[contains(text(),'净层高')]/../text()").extract_first()  # 净层高
        item_xzl['layerArea'] = response.xpath(
            "//div[@class='m_fximg']//span[contains(text(),'单层面积')]/../text()").extract_first()  # 单层面积
        item_xzl['airConditioner'] = response.xpath(
            "//div[@class='m_fximg']//span[contains(text(),'空调')]/../text()").extract_first()  # 空调配套
        item_xzl['lng'], item_xzl['lat'] = self.get_lng_and_lat(response.body.decode())  # 经度、纬度
        if item_xzl['category'] == '写字楼出租':
            item_xzl['monthlyRental'] = response.xpath(
                "//span[contains(text(),'月租金')]/following-sibling::span/text()").extract_first()  # 月租金
        else:
            item_xzl['totalPrice'] = response.xpath(
                "//span[contains(text(),'总价')]/following-sibling::span/text()").extract_first()  # 案例总价
        yield item_xzl

    @staticmethod
    def format_crawl_city_dict(crawl_city_dict):
        """
        城市非住宅url标准化处理
        :param crawl_city_dict:
        :return:
        """
        try:
            city_url = crawl_city_dict['city_url']
            assert city_url
        except:
            return
        else:
            crawl_city_dict['xzl_url'] = city_url + 'office/{}.htm'
            crawl_city_dict['xzl_zu'] = city_url + 'rent/{}'
            crawl_city_dict['xzl_shou'] = city_url + 'sale/{}'
            return crawl_city_dict

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    @staticmethod
    def check_params(params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        if isinstance(params, str):
            try:
                params_li = regex_1.sub(',', params).split(',')
                ret_li = [param.strip() for param in params_li if
                          isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                assert ret_li and len(ret_li) == len(params_li)
            except:
                logger.error('参数输入错误，请重新输入，例如： scrapy crawl office86 -a crawl=重庆，北京')
                raise AssertionError(f'错误参数>>>>>>{params}')
            else:
                return ret_li
        elif isinstance(params, list):
            try:
                ret_li = [param.strip() for param in params if
                          isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                assert ret_li and len(ret_li) == len(params)
            except:
                logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                raise AssertionError(f'错误参数>>>>>>{params}')
            else:
                return ret_li
        else:
            raise AssertionError(f'错误参数>>>>>>{params}')

    @staticmethod
    def get_case_id(case_url):
        """
        提取案例id
        :param case_url:
        :return:
        """
        try:
            ret = re.findall(r'/.*/(.*?)\.htm', case_url)[0]
            assert ret
        except:
            return
        else:
            return ret

    @staticmethod
    def get_lng_and_lat(resp_str):
        """
        提取经纬度
        :param resp_str:
        :return:
        """
        try:
            ret = re.findall(r'(\d{3}\.\d+\|\d{2}\.\d+)', resp_str)[0]
            ret_li = ret.split('|')
            lng = ret_li[0]
            lat = ret_li[1]
        except:
            return None, None
        else:
            return lng, lat
