# -*- coding: utf-8 -*-
# @Time    : 2022/3/10 9:44
# @Author  : ZSQ
# @Email   : zsq199170918@163.com
# @FileName: anjuke.py
# @Software: PyCharm

import scrapy

import logging
import re
import pandas as pd
from HifoXf.utils.anjuke_cities import ANJUKE_CITY_LIST
from copy import copy, deepcopy
from scrapy_pyppeteer.request import PyppeteerRequest
from HifoXf.items import ProjectItem
from urllib.parse import parse_qsl, urlencode

logger = logging.getLogger(__name__)


class AnjukeSpider(scrapy.Spider):
    name = 'anjuke'
    allowed_domains = ['anjuke.com', '58.com']
    start_urls = ['https://www.anjuke.com/sy-city.html']

    custom_settings = {
        'CONCURRENT_REQUESTS': 3,
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'HifoXf.middlewares.CustomRetryMiddleware': 500,
            # 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': None,
            'HifoXf.middlewares.UserAgentMiddleware': 544,
            'HifoXf.middlewares.OuterNetProxyMiddleware': 545,  # 启用redis中的代理，需要同时开启 RedisConnPipeline
            'scrapy_pyppeteer.downloadermiddlewares.PyppeteerMiddleware': 566,
        },
        'DEFAULT_REQUEST_HEADERS': {
            'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
            'accept-encoding': 'gzip, deflate, br',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        },
        'GERAPY_PYPPETEER_HEADLESS': False,
        'GERAPY_ENABLE_REQUEST_INTERCEPTION': True,
        'GERAPY_PYPPETEER_DEVTOOLS': False,
        'GERAPY_PYPPETEER_DUMPIO': True,
        'GERAPY_PYPPETEER_DOWNLOAD_TIMEOUT': 20,
        'COOKIES_ENABLED': False,  # 不携带cookies
        'CASE_ID_FILTER': True,  # 是否开启案例id去重
    }

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter
        super().__init__(*args, **kwargs)

    def parse(self, response, **kwargs):
        try:
            # 去重校验
            assert self.filter_flag != False, f'>>>>{self.name}:案例id去重配置失败<<<<'
            # 参数校验
            cities_li, type = self.get_crawl_or_filter_cities()
        except AssertionError as e:
            # 去重校验未通关，关闭爬虫
            self.crawler.engine.close_spider(self, e)
        except Exception as e:
            logger.error(e)
            # 校验未通过，关闭爬虫
            self.crawler.engine.close_spider(self, '参数错误')
        else:
            # 校验通过，城市预处理
            if cities_li:
                cities_df = pd.DataFrame(ANJUKE_CITY_LIST)
                # 爬取列表
                if type == 1:
                    # 如果爬取列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = cities_df.to_dict(orient='records')
                    else:
                        crawl_city_li = cities_df.loc[cities_df.city_name.isin(cities_li)].to_dict(orient='records')
                # 过滤列表
                else:
                    # 如果过滤列表包含 全国
                    if '全国' in cities_li:
                        crawl_city_li = list()
                        self.crawler.engine.close_spider(self, '过滤列表包含【全国】')
                    # 不包含 全国
                    else:
                        crawl_city_li = cities_df.loc[~cities_df.city_name.isin(cities_li)].to_dict(orient='records')

                # 构造城市非住宅请求
                for xf_request in self.create_city_request(crawl_city_li):
                    # todo 添加请求过滤条件
                    yield xf_request
                del cities_df
            else:
                logger.error('安居客新房城市url列表匹配为空')
                self.crawler.engine.close_spider(self, '安居客新房城市url列表匹配为空')

    def create_city_request(self, crawl_city_li):
        """
        构造城市新房请求
        :param crawl_city_li: 爬取城市列表，包含url构造信息
        :return:
        """
        try:
            assert len(crawl_city_li)
        except:
            self.crawler.engine.close_spider(self, '安居客新房城市爬取列表为空')
        else:
            for crawl_city_dict in crawl_city_li:
                # 城市新房url标准化处理
                crawl_city_dict = self.format_crawl_city_dict(crawl_city_dict)
                if crawl_city_dict:
                    xf_url = crawl_city_dict['xf_url']
                    # 获取新房楼盘列表首页
                    page_num = 1
                    page_param = f'p{page_num}_' if page_num > 1 else ''
                    yield PyppeteerRequest(
                        xf_url.format(page_param),
                        callback=self.parse_project_list,
                        pretend=True,
                        wait_for='div.key-list>div.item-mod',
                        meta=dict(crawl_city_dict=deepcopy(crawl_city_dict), page_num=deepcopy(page_num)),
                        ignore_resource_types=['image', 'media'],
                    )

    def parse_project_list(self, response):
        """
        获取新房楼盘列表页
        :param response:
        :return:
        """
        crawl_city_dict = copy(response.meta['crawl_city_dict'])
        province_name = crawl_city_dict['province_name']
        city_name = crawl_city_dict['city_name']
        page_num = copy(response.meta['page_num'])
        project_div_li = response.xpath(
            "//div[contains(@class,'key-list')]/div[contains(@class,'item-mod') and not(contains(@class,'item-brand'))]")
        if len(project_div_li):
            for project_div in project_div_li:
                item_pt = ProjectItem()
                item_pt['provinceName'] = province_name  # 省份
                item_pt['cityName'] = city_name  # 城市
                item_pt['projectName'] = project_div.xpath(
                    "./div/a/span[@class='items-name']/text()").extract_first()  # 项目名称
                item_pt['projectUrl'] = project_div.xpath("./div/a[@class='lp-name']/@href").extract_first()  # 项目url
                item_pt['projectId'] = self.get_project_id(item_pt['projectUrl'])  # 项目id
                item_pt['projectStatus'] = project_div.xpath(
                    ".//div[@class='tag-panel']/i[1]/text()").extract_first()  # 项目销售状态
                item_pt['propertyType'] = project_div.xpath(
                    ".//div[@class='tag-panel']/i[2]/text()").extract_first()  # 物业类型
                item_pt['projectAddress'] = project_div.xpath(".//a[@class='address']/span/text()").extract()  # 项目地址
                huxing_li = project_div.xpath(".//a[@class='huxing']//text()").extract()
                item_pt['mainStructure'], item_pt['buildingArea'] = self.get_mainstructure_and_buildingarea(
                    huxing_li)  # 户型、建筑面积
                item_pt['tags'] = project_div.xpath(".//div[@class='tag-panel']//text()").extract()  # 标签/项目特色

                # 获取项目详情
                project_detail_url = self.get_project_detail_url(item_pt['projectUrl'], item_pt['projectId'])
                if project_detail_url and item_pt['projectId']:
                    # 根据项目id去重处理
                    if (self.settings['CASE_ID_FILTER'] and self.redis_conn.sadd('anjuke_xf_pid',
                                                                                 item_pt['projectId'])) or (
                            not self.settings['CASE_ID_FILTER']):
                        yield PyppeteerRequest(
                            project_detail_url,
                            callback=self.parse_project_detail,
                            pretend=True,
                            wait_for='div.can-left div.des',
                            meta=dict(item_pt=deepcopy(item_pt), ),
                            ignore_resource_types=['image', 'media'],
                        )
                else:
                    logger.error('{}-{}-{} 项目详情url/id提取为空'.format(item_pt['provinceName'], item_pt['cityName'],
                                                                  item_pt['projectName']))
        else:
            logging.warning('{}-{}-第{}页 楼盘列表获取为空'.format(province_name, city_name, page_num))

        # 翻页
        next_page_url = response.xpath("//a[contains(text(),'下一页')]/@href").extract_first()
        next_page_num = page_num + 1
        if next_page_url:
            yield PyppeteerRequest(
                next_page_url,
                callback=self.parse_project_list,
                pretend=True,
                wait_for='div.key-list>div.item-mod',
                meta=dict(crawl_city_dict=deepcopy(crawl_city_dict), page_num=deepcopy(next_page_num)),
                ignore_resource_types=['image', 'media'],
            )
        else:
            logger.warning('{}-{}-第{}页 提取翻页请求url为空'.format(province_name, city_name, next_page_num))

    def parse_project_detail(self, response):
        """
        获取楼盘详情
        :param response:
        :return:
        """
        item_pt = copy(response.meta['item_pt'])
        item_pt['avgPrice'] = response.xpath(
            "//li/div[contains(text(),'参考单价')]/following-sibling::div[1]/span/text()").extract_first()  # 均价
        item_pt['totalPrice'] = response.xpath(
            "//li/div[contains(text(),'楼盘总价')]/following-sibling::div[1]/span/text()").extract_first()  # 总价
        item_pt['propertyType'] = response.xpath(
            "//li/div[contains(text(),'物业类型')]/following-sibling::div[1]/text()").extract_first()  # 物业类型
        item_pt['developerName'] = response.xpath(
            "//li/div[contains(text(),'开发商')]/following-sibling::div[1]/text()").extract_first()  # 开发商
        item_pt['locationArea'] = response.xpath(
            "//li/div[contains(text(),'区域位置')]/following-sibling::div[1]//text()").extract()  # 区域位置
        item_pt['projectAddress'] = response.xpath(
            "//li/div[contains(text(),'楼盘地址')]/following-sibling::div[1]/text()").extract_first()  # 项目地址
        item_pt['saleTel'] = response.xpath(
            "//li/div[contains(text(),'售楼处电话')]/following-sibling::div[1]/span/text()").extract_first()  # 销售电话
        item_pt['saleAddress'] = response.xpath(
            "//li/div[contains(text(),'售楼处地址')]/following-sibling::div[1]/text()").extract_first()  # 销售地址
        item_pt['openingDate'] = response.xpath(
            "//li/div[contains(text(),'最新开盘')]/following-sibling::div[1]/text()").extract_first()  # 开盘日期
        item_pt['handoverDate'] = response.xpath(
            "//li/div[contains(text(),'交房时间')]/following-sibling::div[1]/text()").extract_first()  # 交房日期
        item_pt['propertyRight'] = response.xpath(
            "//li/div[contains(text(),'产权年限')]/following-sibling::div[1]/text()").extract_first()  # 产权年限
        item_pt['floorAreaRatio'] = response.xpath(
            "//li/div[contains(text(),'容积率')]/following-sibling::div[1]/text()").extract_first()  # 容积率
        item_pt['greeningRate'] = response.xpath(
            "//li/div[contains(text(),'绿化率')]/following-sibling::div[1]/text()").extract_first()  # 绿化率
        item_pt['planningTotalHouseholds'] = response.xpath(
            "//li/div[contains(text(),'规划户数')]/following-sibling::div[1]/text()").extract_first()  # 规划户数
        item_pt['parkingSpaceRatio'] = response.xpath(
            "//li/div[contains(text(),'车位比')]/following-sibling::div[1]/text()").extract_first()  # 车位比
        item_pt['parkingSpacesNum'] = response.xpath(
            "//li/div[contains(text(),'车位数')]/following-sibling::div[1]/text()").extract_first()  # 车位
        item_pt['propertyManagementFee'] = response.xpath(
            "//li/div[contains(text(),'物业管理费')]/following-sibling::div[1]/text()").extract_first()  # 物业管理费
        item_pt['propertyCompany'] = response.xpath(
            "//li/div[contains(text(),'物业公司')]/following-sibling::div[1]//text()").extract()  # 物业公司
        # 提取预售许可证列表
        permit_ul_list = response.xpath("//h4[contains(text(),'预售许可证')]/../following-sibling::div/ul")
        if permit_ul_list:
            for permit_ul in permit_ul_list:
                item_pt['preSalePermit'] = permit_ul.xpath("./li[1]/div[2]//text()").extract()  # 预售许可证
                item_pt['certDate'] = permit_ul.xpath("./li[2]/div[2]//text()").extract()  # 发证日期
                item_pt['preSaleBuilding'] = permit_ul.xpath("./li[3]/div[2]//text()").extract()  # 预售楼栋
                yield item_pt
        else:
            logger.warning(
                '{}-{}-{} 预售许可证列表为空'.format(item_pt['cityName'], item_pt['projectName'], item_pt['projectUrl']))
            item_pt['preSalePermit'] = None  # 预售许可证
            item_pt['certDate'] = None  # 发证日期
            item_pt['preSaleBuilding'] = None  # 预售楼栋
            yield item_pt

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    def check_params(self, params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        try:
            if isinstance(params, str):
                try:
                    params = regex_1.sub(',', params).split(',')
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数输入错误，请重新输入，例如： scrapy crawl fangduoduo -a crawl=重庆，北京')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            elif isinstance(params, list):
                try:
                    ret_li = [param.strip() for param in params if
                              isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                    assert ret_li and len(ret_li) == len(params)
                except:
                    logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                    self.crawler.engine.close_spider(self, '参数错误')
                else:
                    return ret_li
            else:
                raise AssertionError('参数类型不支持')
        except Exception as e:
            logger.error(e)
            self.crawler.engine.close_spider(self, '参数错误')

    @staticmethod
    def format_crawl_city_dict(crawl_city_dict):
        """
        城市新房url标准化处理
        :param crawl_city_dict:
        :return:
        """
        try:
            sp_zu_url = crawl_city_dict['sp_zu']
            assert sp_zu_url
            base_url = sp_zu_url.split('sydc.anjuke.com/')[0] + 'fang.anjuke.com/'
        except:
            return
        else:
            crawl_city_dict['xf_url'] = base_url + 'loupan/all/{}s6/'
            return crawl_city_dict

    @staticmethod
    def get_project_id(project_id_str: str):
        """
        提取项目id
        :param project_id_str:
        :return:
        """
        try:
            ret = project_id_str.split('.html')[0].split('loupan/')[1]
            assert ret
        except Exception as e:
            logger.error('{} 楼盘id提取出错，error:{}'.format(project_id_str, e))
        else:
            return ret

    @staticmethod
    def get_mainstructure_and_buildingarea(huxing_li: list):
        """
        提取户型和建筑面积
        :param huxing_str:
        :return:
        """
        huxing_str = ''.join([i.strip() for i in huxing_li if i and i.strip()])
        try:
            ret_li = huxing_str.split('建筑面积：')
            if len(ret_li) == 2:
                main_structure = ret_li[0].replace('户型：', '')
                main_structure = main_structure if main_structure else None
                building_area = ret_li[1]
            else:
                main_structure = ret_li[0].replace('户型：', '')
                main_structure = main_structure if main_structure else None
                building_area = None
        except:
            return huxing_str, huxing_str
        else:
            return main_structure, building_area

    @staticmethod
    def get_project_detail_url(project_url: str, project_id):
        """
        构造项目详情url
        :param project_url:
        :param project_id:
        :return:
        """
        try:
            params_dict = dict(parse_qsl(project_url.split('?')[1]))
            params_dict['from'] = 'loupan_tab'
            params_str = urlencode(params_dict)
            project_detail_url = project_url.split('?')[0].split('loupan/')[
                                     0] + f'loupan/canshu-{project_id}.html?' + params_str
            assert project_detail_url
        except Exception as e:
            logger.error(f'{project_url}  楼盘详情url构造出错，error:{e}')
        else:
            return project_detail_url
