import scrapy

import re
import logging
import pandas as pd
from HifoEsf.utils.anjuke_cities import ANJUKE_CITY_LIST
from HifoEsf.items import CommunityItem, CaseItem
from selenium.webdriver.common.by import By
from time import sleep
import datetime

logger = logging.getLogger(__name__)


class AnjukeLoupanSpider(scrapy.Spider):
    name = 'anjuke_loupan'
    allowed_domains = ['anjuke.com']
    start_urls = ['https://www.anjuke.com/sy-city.html']

    custom_settings = {
        'CONCURRENT_REQUESTS': 1,
        'DEFAULT_REQUEST_HEADERS': {
            'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
            'accept-encoding': 'gzip, deflate, br',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        },
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'HifoEsf.middlewares.CustomRetryMiddleware': 500,
            # 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': None,
            'HifoEsf.middlewares.UserAgentMiddleware': 544,
            'HifoEsf.middlewares.OuterNetProxyMiddleware': 545,  # 启用redis中的代理，需要同时开启 RedisConnPipeline
            'HifoEsf.middlewares.AnjukeCookiesMiddleware': 546,
            'scrapy_pyppeteer.downloadermiddlewares.PyppeteerMiddleware': 566,
        },
        'ITEM_PIPELINES': {
            'HifoEsf.pipelines.RedisConnPipeline': 299,  # 启用redis中的代理，需要同时开启 XXXProxyMiddleware
            'HifoEsf.pipelines.PcdPipeline': 300,  # 用于省市区id处理
            'HifoEsf.pipelines.AnjukeLpPipeline': 301,
            'HifoEsf.pipelines.MongoClientPipeline': 399,
        },
        'GERAPY_PYPPETEER_HEADLESS': False,
        'GERAPY_ENABLE_REQUEST_INTERCEPTION': True,
        'GERAPY_PYPPETEER_DEVTOOLS': False,
        'GERAPY_PYPPETEER_DUMPIO': True,
        'GERAPY_PYPPETEER_DOWNLOAD_TIMEOUT': 20,
        # 'COOKIES_ENABLED': False,  # 不携带cookies
        # 'CASE_ID_FILTER': True,  # 是否开启案例id去重
    }

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter
        super().__init__(*args, **kwargs)

    def parse(self, response, **kwargs):
        try:
            # 参数校验
            cities_li, type = self.get_crawl_or_filter_cities()
        except AssertionError as e:
            # 去重校验未通关，关闭爬虫
            self.crawler.engine.close_spider(self, e)
        except Exception as e:
            logger.error(e)
            # 校验未通过，关闭爬虫
            self.crawler.engine.close_spider(self, '参数错误')
        else:
            # 校验通过，城市预处理
            if cities_li:
                cities_df = pd.DataFrame(ANJUKE_CITY_LIST)
                # 爬取列表
                if type == 1:
                    # 如果爬取列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = cities_df.to_dict(orient='records')
                    else:
                        crawl_city_li = cities_df.loc[cities_df.city_name.isin(cities_li)].to_dict(orient='records')
                # 过滤列表
                else:
                    # 如果过滤列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = list()
                        self.crawler.engine.close_spider(self, '过滤列表包含【全国】')
                    # 不包含 全国
                    else:
                        crawl_city_li = cities_df.loc[~cities_df.city_name.isin(cities_li)].to_dict(orient='records')
                del cities_df
                # 构造城市二手房楼盘请求
                self.create_city_request(crawl_city_li)
            else:
                logger.error('安居客二手房城市url列表匹配为空')
                self.crawler.engine.close_spider(self, '安居客二手房城市url列表匹配为空')

    def create_city_request(self, crawl_city_li):
        """
        构造城市二手房楼盘请求
        :param crawl_city_li: 爬取城市列表，包含url构造信息
        :return:
        """
        try:
            assert len(crawl_city_li)
        except:
            self.crawler.engine.close_spider(self, '安居客二手房城市爬取列表为空')
        else:
            for crawl_city_dict in crawl_city_li:
                # 城市二手房url标准化处理
                crawl_city_dict = self.format_crawl_city_dict(crawl_city_dict)
                if crawl_city_dict:
                    esf_url = crawl_city_dict['esf_url']
                    # 获取二手房小区（楼盘）列表
                    page_num = 1
                    page_param = f'p{page_num}/' if page_num > 1 else ''
                    esf_url = esf_url.format(page_param) + 'o6/'
                    self.parse_community_list(esf_url, crawl_city_dict)

    def parse_community_list(self, url, crawl_city_dict: dict):
        """
        获取二手房小区（楼盘）列表
        :param response:
        :return:
        """
        province_name = crawl_city_dict.get("province_name")
        city_name = crawl_city_dict.get("city_name")
        self.driver.get(url)
        section_len = len(
            self.driver.find_elements(By.XPATH, "//section//ul[@class='region-parents']/li[position()>1]/a"))
        for index in range(section_len):
            section_li = self.driver.find_elements(By.XPATH,
                                                   "//section//ul[@class='region-parents']/li[position()>1]/a")
            district_name = section_li[index].text
            section_li[index].click()
            sleep(1)
            total_num = self.get_total_num(city_name, district_name)
            # 区分商圈
            if total_num > 1250:
                region_section_len = len(
                    self.driver.find_elements(By.XPATH, "//div[@class='region-childs']//li[position()>1]/a"))
                for region_index in range(region_section_len):
                    region_section_li = self.driver.find_elements(By.XPATH,
                                                                "//div[@class='region-childs']//li[position()>1]/a")
                    region_name = region_section_li[region_index].text
                    region_section_li[region_index].click()
                    self.scroll_down_page()
                    # 获取楼盘详情信息
                    self.parse_community(province_name, city_name, district_name, region_name)
                    page_num = 1
                    while True:
                        try:
                            next_page_ele = self.driver.find_element(By.CSS_SELECTOR, ".pagination .next-active")
                            next_page_ele.click()
                        except:
                            break
                        else:
                            page_num += 1
                            sleep(.5)
                            self.scroll_down_page()
                            sleep(.5)
                            # 获取楼盘详情信息
                            self.parse_community(province_name, city_name, district_name, region_name, page_num)
                    # 切换到第一页
                    if page_num > 1:
                        first_page_ele = self.driver.find_element(By.XPATH,
                                                                  "//ul[@class='page']/li[contains(@class,'page-item')][1]/a")
                        first_page_ele.click()
                        sleep(1)
                # 切换到不限商圈
                self.driver.find_element(By.XPATH, "//div[@class='region-childs']//li[1]/a").click()
            # 不区分商圈
            elif total_num:
                self.scroll_down_page()
                # 获取楼盘详情信息
                self.parse_community(province_name, city_name, district_name, )
                page_num = 1
                while True:
                    try:
                        next_page_ele = self.driver.find_element(By.CSS_SELECTOR, ".pagination .next-active")
                        next_page_ele.click()
                    except:
                        break
                    else:
                        page_num += 1
                        sleep(.5)
                        self.scroll_down_page()
                        sleep(.5)
                        # 获取楼盘详情信息
                        self.parse_community(province_name, city_name, district_name, page_num=page_num)
                # 切换到第一页
                if page_num > 1:
                    first_page_ele = self.driver.find_element(By.XPATH,
                                                              "//ul[@class='page']/li[contains(@class,'page-item')][1]/a")
                    first_page_ele.click()
                    sleep(1)
            else:
                continue

    def parse_community(self, province_name, city_name, district_name, region_name='不限', page_num=1):
        """
        提取楼盘详情信息
        :return:
        """
        try:
            community_dict_li = self.driver.execute_script(
                "return window.__NUXT__.data[0].communityListData.communities")
            assert len(community_dict_li)
        except Exception as e:
            logger.error("{}-{}-{}-第{}页 楼盘详情信息提取出错，error：{}".format(city_name, district_name, region_name, page_num, e))
        else:
            for community_dict in community_dict_li:
                item_eg = CommunityItem()
                item_eg['provinceName'] = province_name
                item_eg['cityName'] = city_name
                item_eg['districtName'] = district_name
                item_eg['shangQuan'] = community_dict['base'].get('tradingAreaName')  # 商圈
                item_eg['name'] = community_dict['base'].get('name')  # 楼盘名称
                item_eg['address'] = community_dict['base'].get('address')  # 楼盘地址
                item_eg['buildYear'] = community_dict['base'].get('completionTime')  # 建筑年代
                item_eg['otherName'] = community_dict['extend'].get('searchNameList')  # 其他名称
                item_eg['greeningRate'] = community_dict['extend'].get('landscapingRatio')  # 绿化率
                item_eg['guid'] = community_dict['base'].get('id')  # 楼盘id
                item_eg['communityUrl'] = community_dict['otherJumpAction'].get('jieduLink').split('/jiedu/')[0]  # 楼盘链接
                item_eg['type'] = community_dict['extend'].get('type')  # 物业类型
                item_eg['buildType'] = community_dict['base'].get('buildTypeStr')  # 建筑类别
                item_eg['property'] = community_dict['base'].get('shipTypeStr')  # 产权性质
                item_eg['totalRoomCount'] = community_dict['extend'].get('totalHouseHoldNum')  # 总户数
                item_eg['grossBuildArea'] = community_dict['extend'].get('totalArea')  # 楼盘建筑面积
                item_eg['tag'] = community_dict['base'].get('tags')  # 标签
                item_eg['price'] = community_dict['priceInfo'].get('price')  # 单价
                item_eg['remarks'] = community_dict['priceInfo'].get('title')  # 备注
                item_eg['monthOnMonth'] = community_dict['priceInfo'].get('monthChange')  # 环比
                item_eg['elevator'] = community_dict['extend'].get('hasElevator')  # 有无电梯
                item_eg['elevator'] = True if item_eg['elevator'] else False
                item_eg['waterPowerSupply'] = community_dict['extend'].get('waterPowerSupply')  # 供水供电
                item_eg['location'] = self.get_location(community_dict['base'].get('lng'),
                                                        community_dict['base'].get('lat'))  # 经纬度
                item_eg['other'] = community_dict.get('grade')  # 其它信息
                item_eg['fetchDate'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                item_eg['updateDate'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                item_eg['sr'] = 'Anjuke'
                item_eg['siteName'] = '安居客'
                self.clo.set_db('newhours')
                self.clo.set_col('anjuke_esf_community')
                filter_item = dict()
                filter_item['guid'] = item_eg['guid']
                self.clo.update_one(filter_item, dict(item_eg))

    def get_total_num(self, city_name, district_name):
        """
        提取楼盘总数
        :param district_name:
        :return:
        """
        try:
            total_info = self.driver.find_element(By.XPATH, "//span[@class='total-info']")
            total_num = int(re.findall(r"\d+", total_info.text)[0])
            assert total_num
        except:
            logger.warning("{}-{} 楼盘总数获取为空".format(city_name, district_name))
            return 0
        else:
            return total_num

    @staticmethod
    def get_location(lng, lat):
        """
        获取经纬度
        :param lng:
        :param lat:
        :return:
        """
        try:
            lng = float(lng)
            lat = float(lat)
            ret = (lng, lat)
        except:
            return
        else:
            return ret

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    @staticmethod
    def format_crawl_city_dict(crawl_city_dict):
        """
        城市二手房url标准化处理
        :param crawl_city_dict:
        :return:
        """
        try:
            city_url = crawl_city_dict['city_url']
            assert city_url
        except:
            return
        else:
            crawl_city_dict['esf_url'] = city_url + '/community/{}'
            return crawl_city_dict

    def check_params(self, params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        try:
            if isinstance(params, str):
                try:
                    params = regex_1.sub(',', params).split(',')
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数输入错误，请重新输入，例如： scrapy crawl anjuke -a crawl=重庆，北京')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            elif isinstance(params, list):
                try:
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            else:
                raise AssertionError('参数类型不支持')
        except Exception as e:
            logger.error(e)
            self.crawler.engine.close_spider(self, '参数错误')

    def scroll_down_page(self, speed=50):
        current_scroll_position, new_height = 0, 1
        while current_scroll_position <= new_height:
            current_scroll_position += speed
            self.driver.execute_script("window.scrollTo(0, {});".format(current_scroll_position))
            new_height = self.driver.execute_script("return document.body.scrollHeight")
