# -*- coding: utf-8 -*-
import scrapy
import logging
import re
import pandas as pd
import random
from HifoEsf.utils.beike_cities import BEIKE_CITY_LIST
from copy import deepcopy, copy
import json
from HifoEsf.items import BuildingUnitItem

logger = logging.getLogger(__name__)


class BeikeBuildingSpider(scrapy.Spider):
    name = 'beike_building'
    allowed_domains = ['ke.com']
    start_urls = ['http://icanhazip.com']
    custom_settings = {
        'CONCURRENT_REQUESTS': 3,
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'HifoEsf.middlewares.CustomRetryMiddleware': 500,
            'HifoEsf.middlewares.UserAgentMiddleware': 544,
            'HifoEsf.middlewares.OuterNetProxyMiddleware': 545,  # 启用redis中的代理，需要同时开启 RedisConnPipeline
            'HifoEsf.middlewares.BeikeCookiesMiddleware': 546,
        },
        'ITEM_PIPELINES': {
            'HifoEsf.pipelines.RedisConnPipeline': 299,  # 启用redis中的代理，需要同时开启 XXXProxyMiddleware
            'HifoEsf.pipelines.BeikeBdPipeline': 301,
            'HifoEsf.pipelines.MongoClientPipeline': 399,
        },
        'RETRY_HTTP_CODES': [500, 502, 503, 504, 400, 404, 408, 407, 302],
        'RETRY_TIMES': 20,
        'CASE_ID_FILTER': True,  # 是否开启案例id去重
        'DOWNLOAD_DELAY': random.uniform(0.8, 1.2),
        'USER_AGENTS': [
            "Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1",
            "Mozilla/5.0 (Linux; Android 8.0.0; SM-G955U Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Mobile Safari/537.36",
            "Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Mobile Safari/537.36"
        ]
    }
    # building_url_temp = 'https://m.ke.com/liverpool/api/yezhu/getYezhuBuildings?community_id={}'  # GET
    # unit_url_temp = 'https://m.ke.com/liverpool/api/yezhu/getYezhuUnits?community_id={}&building_id={}'  # GET
    # house_url_temp = 'https://m.ke.com/liverpool/api/yezhu/getYezhuHouses?community_id={}&building_id={}&unit_id={}'  # GET
    building_url_temp = 'https://landlord.m.ke.com/api/web/aj/resblock/building?resblock_id={}'  # GET
    unit_url_temp = 'https://landlord.m.ke.com/api/web/aj/building/unit?building_id={}'  # GET
    house_url_temp = 'https://landlord.m.ke.com/api/web/aj/unit/floor?unit_id={}'  # GET

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter
        super().__init__(*args, **kwargs)

    def parse(self, response, **kwargs):
        try:
            # 参数校验
            cities_li, type = self.get_crawl_or_filter_cities()
        except AssertionError as e:
            # 参数校验未通关，关闭爬虫
            self.crawler.engine.close_spider(self, e)
        except Exception as e:
            logger.error(e)
            # 校验未通过，关闭爬虫
            self.crawler.engine.close_spider(self, '参数错误')
        else:
            # 校验通过，城市预处理
            if cities_li:
                cities_df = pd.DataFrame(BEIKE_CITY_LIST)
                # 爬取列表
                if type == 1:
                    # 如果爬取列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = cities_df.to_dict(orient='records')
                    else:
                        crawl_city_li = cities_df.loc[cities_df.city_name.isin(cities_li)].to_dict(orient='records')
                # 过滤列表
                else:
                    # 如果过滤列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = list()
                        self.crawler.engine.close_spider(self, '过滤列表包含【全国】')
                    # 不包含 全国
                    else:
                        crawl_city_li = cities_df.loc[~cities_df.city_name.isin(cities_li)].to_dict(orient='records')
                del cities_df
                if not len(crawl_city_li):
                    self.crawler.engine.close_spider(self, '{} 暂不支持爬取'.format(cities_li))
                else:
                    self.check_cities(cities_li, crawl_city_li)
                    # 构造城市二手房楼盘请求
                    for esf_request in self.create_city_request(crawl_city_li):
                        yield esf_request
            else:
                logger.error('贝壳二手房城市url列表匹配为空')
                self.crawler.engine.close_spider(self, '贝壳二手房城市url列表匹配为空')

    def create_city_request(self, crawl_city_li):
        """
        构造城市二手房楼盘请求
        :param crawl_city_li: 爬取城市列表，包含url构造信息
        :return:
        """
        try:
            assert len(crawl_city_li)
        except:
            self.crawler.engine.close_spider(self, '贝壳二手房城市爬取列表为空')
        else:
            self.clo.set_db('newhours')
            self.clo.set_col('beike_esf_community')
            # 获取城市楼盘
            for crawl_city_dict in crawl_city_li:
                city_name = crawl_city_dict['city_name']
                city_m = crawl_city_dict['service']['ext']['m']
                referer_url = f"https://{city_m['host']}{city_m['uri']}yezhu"
                headers = {
                    # 'Host': city_m['host'],
                    'Host': 'landlord.m.ke.com',
                    # 'Referer': referer_url,
                    'Referer': 'https://landlord.m.ke.com/clue/selectAddress?type=building',
                    'ORIGINAL-PAGE-URL': referer_url,
                    'Accept': 'application/json, text/plain, */*',
                    'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
                }
                field_dict = {'_id': 0, 'provinceName': 1, 'cityName': 1, 'districtName': 1, 'name': 1, 'guid': 1, }
                community_li = list(self.clo._collection.find({'cityName': city_name}, field_dict))
                for community_dict in community_li:
                    if '周边' in community_dict['districtName']:
                        continue
                    community_id = community_dict['guid']
                    # 构造请求，获取楼栋列表
                    yield scrapy.Request(
                        self.building_url_temp.format(community_id),
                        headers=headers,
                        callback=self.parse_building_li,
                        meta=dict(community_dict=deepcopy(community_dict), headers=deepcopy(headers), ),
                        dont_filter=True,
                        priority=10,
                    )
                del community_li

    def parse_building_li(self, response):
        """
        获取楼栋列表
        :param response:
        :return:
        """
        community_dict = copy(response.meta['community_dict'])
        headers = copy(response.meta['headers'])
        try:
            resp = json.loads(response.body.decode())
            # total_page = resp['data']['data']['totalPageCount']
            total_page = 1
            # building_list = resp['data']['data']['list']
            building_list = resp['data']
            assert len(building_list), '{}-{}-{} 楼栋列表获取为空'.format(community_dict['cityName'],
                                                                  community_dict['districtName'],
                                                                  community_dict['name'])
        except AssertionError as e:
            logging.error(e)
        except Exception as e:
            logging.error(
                '{}-{}-{} 楼栋列表获取出错，error:{}'.format(community_dict['cityName'], community_dict['districtName'],
                                                    community_dict['name'], e))
        else:
            if total_page > 1:
                logging.warning('{}-{}-{} 楼栋数超过1页'.format(community_dict['cityName'], community_dict['districtName'],
                                                          community_dict['name']))
            for building_dict in building_list:
                # building_id = building_dict['buildingId']
                building_id = building_dict['building_id']
                if not building_id:
                    continue
                building_dict_new = dict()
                building_dict_new['buildingId'] = building_dict['building_id']
                building_dict_new['buildingName'] = building_dict['building_name']
                # building_dict_f = {**community_dict, **building_dict}
                building_dict_f = {**community_dict, **building_dict_new}
                # 构造请求，获取单元列表
                yield scrapy.Request(
                    # self.unit_url_temp.format(building_dict_f['guid'], building_dict_f['buildingId']),
                    self.unit_url_temp.format(building_dict_f['buildingId']),
                    headers=headers,
                    callback=self.parse_unit_li,
                    meta=dict(building_dict_f=deepcopy(building_dict_f), headers=deepcopy(headers), ),
                    priority=20,
                    dont_filter=True,
                )

    def parse_unit_li(self, response):
        """
        获取单元列表
        :param response:
        :return:
        """
        building_dict_f = copy(response.meta['building_dict_f'])
        headers = copy(response.meta['headers'])
        try:
            resp = json.loads(response.body.decode())
            # total_page = resp['data']['data']['totalPageCount']
            total_page = 1
            # unit_list = resp['data']['data']['list']
            unit_list = resp['data']
            assert len(unit_list), '{}-{}-{}-{} 单元列表获取为空'.format(building_dict_f['cityName'],
                                                                 building_dict_f['districtName'],
                                                                 building_dict_f['name'],
                                                                 building_dict_f['buildingName'], )
        except AssertionError as e:
            logging.error(e)
        except Exception as e:
            logging.error(
                '{}-{}-{}-{} 单元列表获取出错，error:{}'.format(building_dict_f['cityName'], building_dict_f['districtName'],
                                                       building_dict_f['name'], building_dict_f['buildingName'], e))
        else:
            if total_page > 1:
                logging.warning(
                    '{}-{}-{}-{} 单元数超过1页'.format(building_dict_f['cityName'], building_dict_f['districtName'],
                                                 building_dict_f['name'], building_dict_f['buildingName'], ))
            for unit_dict in unit_list:
                # unit_id = unit_dict['unitId']
                unit_id = unit_dict['unit_id']
                if not unit_id:
                    continue
                unit_dict_new = dict()
                unit_dict_new['unitId'] = unit_dict['unit_id']
                unit_dict_new['unitName'] = unit_dict['unit_name']
                # unit_dict_f = {**building_dict_f, **unit_dict}
                unit_dict_f = {**building_dict_f, **unit_dict_new}
                # 构造请求，获取房号列表
                yield scrapy.Request(
                    # self.house_url_temp.format(unit_dict_f['guid'], unit_dict_f['buildingId'], unit_dict_f['unitId']),
                    self.house_url_temp.format(unit_dict_f['unitId']),
                    headers=headers,
                    callback=self.parse_house_li,
                    meta=dict(unit_dict_f=deepcopy(unit_dict_f), ),
                    priority=30,
                    dont_filter=True,
                )
            del headers
            del building_dict_f

    def parse_house_li(self, response):
        """
        获取房号列表
        :param response:
        :return:
        """
        unit_dict_f = copy(response.meta['unit_dict_f'])
        try:
            resp = json.loads(response.body.decode())
            # total_page = resp['data']['data']['totalPageCount']
            total_page = 1
            # house_list = resp['data']['data']['list']
            house_list = resp['data']
            assert len(house_list), '{}-{}-{}-{}-{} 房号列表获取为空'.format(unit_dict_f['cityName'],
                                                                     unit_dict_f['districtName'],
                                                                     unit_dict_f['name'],
                                                                     unit_dict_f['buildingName'],
                                                                     unit_dict_f['unitName'], )
        except AssertionError as e:
            logging.error(e)
        except Exception as e:
            logging.error(
                '{}-{}-{}-{}-{} 房号列表获取出错，error:{}'.format(unit_dict_f['cityName'], unit_dict_f['districtName'],
                                                          unit_dict_f['name'], unit_dict_f['buildingName'],
                                                          unit_dict_f['unitName'], e))
        else:
            if total_page > 1:
                logging.warning(
                    '{}-{}-{}-{}-{} 房号数超过1页'.format(unit_dict_f['cityName'], unit_dict_f['districtName'],
                                                    unit_dict_f['name'], unit_dict_f['buildingName'],
                                                    unit_dict_f['unitName'], ))
            floor_list = list()
            room_list = list()
            floor_room_list = list()
            # for house_dict in house_list:
            #     house_id = house_dict['houseId']
            #     house_name = house_dict['houseName']
            #     if not house_id:
            #         continue
            #     room_list.append(house_name)
            for floor_dict in house_list:
                floor_id = floor_dict['floor_id']
                floor_name = floor_dict['floor_name']
                if not floor_id:
                    continue
                floor_list.append(floor_name)
                house_dict_li = floor_dict['houses']
                floor_room_dict = dict()
                house_list = list()
                for house_dict in house_dict_li:
                    house_id = house_dict['house_id']
                    house_name = house_dict['house_name']
                    if not house_id:
                        continue
                    room_list.append(house_name)
                    house_list.append(house_name)
                floor_room_dict[floor_name] = house_list
                floor_room_list.append(floor_room_dict)
            item_unit = BuildingUnitItem()
            item_unit['provinceName'] = unit_dict_f['provinceName']
            item_unit['cityName'] = unit_dict_f['cityName']
            item_unit['districtName'] = unit_dict_f['districtName']
            item_unit['name'] = unit_dict_f['name']
            item_unit['guid'] = unit_dict_f['guid']
            item_unit['buildingName'] = unit_dict_f['buildingName']
            item_unit['buildingId'] = unit_dict_f['buildingId']
            item_unit['unitName'] = unit_dict_f['unitName']
            item_unit['unitId'] = unit_dict_f['unitId']
            item_unit['roomSet'] = room_list
            item_unit['floorSet'] = floor_list
            item_unit['other'] = floor_room_list
            yield item_unit
            del unit_dict_f

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    def check_params(self, params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        try:
            if isinstance(params, str):
                try:
                    params = regex_1.sub(',', params).split(',')
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数输入错误，请重新输入，例如： scrapy crawl anjuke -a crawl=重庆，北京')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            elif isinstance(params, list):
                try:
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            else:
                raise AssertionError('参数类型不支持')
        except Exception as e:
            logger.error(e)
            self.crawler.engine.close_spider(self, '参数错误')

    @staticmethod
    def check_cities(cities_li, crawl_city_li):
        """
        爬取城市验证
        :param cities_li:
        :param crawl_city_li:
        :return:
        """
        crawl_df = pd.DataFrame(crawl_city_li)
        city_name_li = crawl_df['city_name'].to_list()
        if len(cities_li) != len(crawl_city_li):
            ret_li = list()
            for city_name in cities_li:
                if city_name not in city_name_li:
                    ret_li.append(city_name)
            logging.error(f"{ret_li} 暂不支持爬取")
