# -*- coding: utf-8 -*-
import scrapy

import logging
import re
from copy import copy, deepcopy
from HifoEsf.items import CaseItem

logger = logging.getLogger(__name__)


class BeikeCjV2Spider(scrapy.Spider):
    name = 'beike_cj_v2'
    allowed_domains = ['ke.com']
    start_urls = ['https://www.ke.com/city/']

    custom_settings = {
        'CONCURRENT_REQUESTS': 4,
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'HifoEsf.middlewares.CustomRetryMiddleware': 500,
            # 'scrapy.downoadermiddlewares.redirect.RedirectMiddleware': None,

            'HifoEsf.middlewares.UserAgentMiddleware': 544,
            'HifoEsf.middlewares.OuterNetProxyMiddleware': 545,  # 启用redis中的代理，需要同时开启 RedisConnPipeline
            'HifoEsf.middlewares.BeikeCookiesMiddleware': 546,
        },
        'ITEM_PIPELINES': {
            'HifoEsf.pipelines.RedisConnPipeline': 299,  # 启用redis中的代理，需要同时开启 XXXProxyMiddleware
            'HifoEsf.pipelines.BiekeCjPipeline': 302,
            'HifoEsf.pipelines.MongoClientPipeline': 399,
        },
        'RETRY_TIMES': 20,
        'DOWNLOAD_DELAY': 1,
    }

    def __init__(self, *args, crawl=None, filter=None, **kwargs):
        self.CRAWL_CITIES = crawl
        self.FILTER_CITIES = filter
        super().__init__(*args, **kwargs)

    def parse(self, response, **kwargs):
        """
        爬取城市预处理
        :param response:
        :param kwargs:
        :return:
        """
        try:
            # todo 去重校验
            # assert self.filter_flag != False, f'>>>>{self.name}:案例id去重配置失败<<<<'
            # 参数校验
            cities_li, type = self.get_crawl_or_filter_cities()
        except AssertionError as e:
            # 去重校验未通关，关闭爬虫
            self.crawler.engine.close_spider(self, e)
        except Exception as e:
            logger.error(e)
            # 校验未通过，关闭爬虫
            self.crawler.engine.close_spider(self, '参数错误')
        else:
            # 校验通过，城市预处理
            if cities_li:
                headers = {'Referer': 'https://www.ke.com/'}
                # 获取省(直辖市)分组
                province_div_li = response.xpath("//div[contains(@data-action,'国内')]//div[@class='city_province']")
                for province_div in province_div_li:
                    province_name = province_div.xpath("./div/text()").extract_first().strip()
                    # 获取该省下的城市分组
                    city_data_li = province_div.xpath("./ul/li")
                    for city_data in city_data_li:
                        city_name = city_data.xpath("./a/text()").extract_first()
                        city_url = city_data.xpath("./a/@href").extract_first() + '/ershoufang/'
                        # url中含有fang的是新房连接
                        if 'fang.' not in city_url:
                            # 爬取列表
                            if type == 1:
                                if ('全国' in cities_li) or (city_name in cities_li):
                                    yield response.follow(
                                        city_url,
                                        headers=headers,
                                        callback=self.parse_chengjiao_url,
                                        meta=dict(province_name=deepcopy(province_name),
                                                  city_name=deepcopy(city_name), ),
                                        dont_filter=True,
                                    )
                            # 过滤列表
                            if type == 2:
                                if '全国' in cities_li:
                                    break
                                elif city_name not in cities_li:
                                    yield response.follow(
                                        city_url,
                                        headers=headers,
                                        callback=self.parse_chengjiao_url,
                                        meta=dict(province_name=deepcopy(province_name),
                                                  city_name=deepcopy(city_name), ),
                                        dont_filter=True,
                                    )
                        else:
                            logger.warning(f'{city_name} 没有二手房链接')
            else:
                logger.error('贝壳二手房城市url列表匹配为空')
                self.crawler.engine.close_spider(self, '贝壳二手房城市url列表匹配为空')

    def parse_chengjiao_url(self, response):
        """
        获取城市二手房成交url
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        try:
            chengjiao_url = response.xpath("//div[@class='header']//a[contains(text(),'成交')]/@href").extract_first()
            assert chengjiao_url and 'fang.' not in chengjiao_url, f'{city_name} 没有小区链接'
        except AssertionError as e:
            logger.warning(e)
        else:
            # 构造请求，获取成交列表页首页(不带行政区)
            yield response.follow(
                chengjiao_url,
                callback=self.parse_chengjiao_li,
                meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name), ),
                dont_filter=True,
            )

    def parse_chengjiao_li(self, response):
        """
        获取成交列表页
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        chengjiao_total_num = self.get_chengjiao_toal_num(response)
        if isinstance(chengjiao_total_num, int) and chengjiao_total_num:
            # 获取行政区列表
            district_obj_li = response.xpath("//div[@data-role='ershoufang']/div/a")
            for district_obj in district_obj_li:
                district_name = district_obj.xpath("./text()").extract_first()
                district_url = district_obj.xpath("./@href").extract_first()
                if district_name and '周边' not in district_name:
                    # 构造请求，获取行政区列表首页
                    next_page_url_temp = district_url + 'pg{}/'
                    yield response.follow(
                        district_url,
                        callback=self.parse_district_li,
                        meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name),
                                  district_name=deepcopy(district_name),
                                  next_page_url_temp=deepcopy(next_page_url_temp), page_num=1),
                        dont_filter=True,
                    )
        else:
            logging.error('{}-{} 成交案例总数提取出错或者为0'.format(province_name, city_name, ))

    def parse_district_li(self, response):
        """
        获取行政区列表页
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        district_name = copy(response.meta['district_name'])
        page_num = copy(response.meta['page_num'])
        next_page_url_temp = copy(response.meta['next_page_url_temp'])
        chengjiao_total_num = self.get_chengjiao_toal_num(response)
        if isinstance(chengjiao_total_num, int):
            # 最多显示100页数据（3000条）
            # 数量未超过3000，不区分商圈
            if 0 < chengjiao_total_num <= 3000:
                case_li = response.xpath("//ul[@class='listContent']/li")
                if len(case_li):
                    for case_obj in case_li:
                        item_case = CaseItem()
                        item_case['provinceName'] = province_name  # 省份
                        item_case['cityName'] = city_name  # 城市
                        item_case['districtName'] = district_name  # 行政区
                        item_case['caseType'] = '贝壳成交'  # 案例类型
                        item_case['dataUrl'] = case_obj.xpath(".//div[@class='title']/a/@href").extract_first()  # 案例url
                        item_case['caseId'] = self.get_case_id(item_case['dataUrl'])  # 案例id
                        item_case['title'] = case_obj.xpath(".//div[@class='title']/a/text()").extract_first()
                        item_case['name'], item_case['roomType'], item_case['buildArea'] = self.deal_title(
                            item_case['title'])
                        house_info_li = case_obj.xpath(".//div[@class='houseInfo']/text()").extract()
                        item_case['directionType'], item_case['decorationType'] = self.deal_house_info(house_info_li)
                        date_li = case_obj.xpath(".//div[@class='dealDate']/text()").extract()
                        item_case['other'] = self.deal_date(date_li)  # 成交日期
                        item_case['totalPrice'] = case_obj.xpath(
                            ".//div[@class='totalPrice']/span/text()").extract_first()  # 总价
                        item_case['price'] = case_obj.xpath(
                            ".//div[@class='unitPrice']/span/text()").extract_first()  # 单价
                        position_info_li = case_obj.xpath(".//div[@class='positionInfo']/text()").extract()
                        item_case['buildYear'], item_case['floor'], item_case['totalFloor'] = self.deal_position_info(
                            position_info_li)
                        item_case['tag'] = case_obj.xpath(".//span[contains(@class,'deal')]/span/text()").extract()
                        yield item_case
                    # 翻页
                    next_page_num = page_num + 1
                    next_page_url = next_page_url_temp.format(next_page_num)
                    if len(case_li) >= 30 and next_page_num <= 100:
                        yield response.follow(
                            next_page_url,
                            callback=self.parse_district_li,
                            meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name),
                                      district_name=deepcopy(district_name),
                                      next_page_url_temp=deepcopy(next_page_url_temp), page_num=next_page_num),
                            dont_filter=True,
                        )
                else:
                    logging.warning('{}-{}-{}-第{}页 成交案例列表为空'.format(province_name, city_name, district_name, page_num))
            # 数量超过3000，区分商圈
            elif chengjiao_total_num > 3000:
                # 只在第一页进行区分
                if page_num == 1:
                    # 获取商圈列表
                    shangquan_li = response.xpath("//div[@data-role='ershoufang']/div[2]/a")
                    for shangquan_obj in shangquan_li:
                        shangquan_name = shangquan_obj.xpath("./text()").extract_first()
                        shangquan_url = shangquan_obj.xpath("./@href").extract_first()
                        # 构造请求，获取商圈列表首页
                        next_page_url_temp = shangquan_url + 'pg{}/'
                        yield response.follow(
                            shangquan_url,
                            callback=self.parse_shangquan_li,
                            meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name),
                                      district_name=deepcopy(district_name), shangquan_name=deepcopy(shangquan_name),
                                      next_page_url_temp=deepcopy(next_page_url_temp), page_num=1),
                            dont_filter=True,
                        )
            else:
                logging.warning('{}-{}-{} 成交案例总数为0'.format(province_name, city_name, district_name))
        else:
            logging.error('{}-{}-{} 成交案例总数提取出错'.format(province_name, city_name, district_name))

    def parse_shangquan_li(self, response):
        """
        获取商圈列表页
        :param response:
        :return:
        """
        province_name = copy(response.meta['province_name'])
        city_name = copy(response.meta['city_name'])
        district_name = copy(response.meta['district_name'])
        shangquan_name = copy(response.meta['shangquan_name'])
        page_num = copy(response.meta['page_num'])
        next_page_url_temp = copy(response.meta['next_page_url_temp'])
        chengjiao_total_num = self.get_chengjiao_toal_num(response)
        if isinstance(chengjiao_total_num, int):
            if chengjiao_total_num:
                if chengjiao_total_num > 3000:
                    logging.warning(
                        '{}-{}-{}-{} 成交案例总数超过100页限制'.format(province_name, city_name, district_name, shangquan_name))
                case_li = response.xpath("//ul[@class='listContent']/li")
                if len(case_li):
                    for case_obj in case_li:
                        item_case = CaseItem()
                        item_case['provinceName'] = province_name  # 省份
                        item_case['cityName'] = city_name  # 城市
                        item_case['districtName'] = district_name  # 行政区
                        item_case['caseType'] = '贝壳成交'  # 案例类型
                        item_case['dataUrl'] = case_obj.xpath(".//div[@class='title']/a/@href").extract_first()  # 案例url
                        item_case['caseId'] = self.get_case_id(item_case['dataUrl'])  # 案例id
                        item_case['title'] = case_obj.xpath(".//div[@class='title']/a/text()").extract_first()
                        item_case['name'], item_case['roomType'], item_case['buildArea'] = self.deal_title(
                            item_case['title'])
                        house_info_li = case_obj.xpath(".//div[@class='houseInfo']/text()").extract()
                        item_case['directionType'], item_case['decorationType'] = self.deal_house_info(house_info_li)
                        date_li = case_obj.xpath(".//div[@class='dealDate']/text()").extract()
                        item_case['other'] = self.deal_date(date_li)  # 成交日期
                        item_case['totalPrice'] = case_obj.xpath(
                            ".//div[@class='totalPrice']/span/text()").extract_first()  # 总价
                        item_case['price'] = case_obj.xpath(
                            ".//div[@class='unitPrice']/span/text()").extract_first()  # 单价
                        position_info_li = case_obj.xpath(".//div[@class='positionInfo']/text()").extract()
                        item_case['buildYear'], item_case['floor'], item_case['totalFloor'] = self.deal_position_info(
                            position_info_li)
                        item_case['tag'] = case_obj.xpath(".//span[contains(@class,'deal')]/span/text()").extract()
                        yield item_case
                    # 翻页
                    next_page_num = page_num + 1
                    next_page_url = next_page_url_temp.format(next_page_num)
                    if len(case_li) >= 30 and next_page_num <= 100:
                        yield response.follow(
                            next_page_url,
                            callback=self.parse_shangquan_li,
                            meta=dict(province_name=deepcopy(province_name), city_name=deepcopy(city_name),
                                      district_name=deepcopy(district_name), shangquan_name=deepcopy(shangquan_name),
                                      next_page_url_temp=deepcopy(next_page_url_temp), page_num=next_page_num),
                            dont_filter=True,
                        )
                else:
                    logging.warning(
                        '{}-{}-{}-{}-第{}页 成交案例列表为空'.format(province_name, city_name, district_name, shangquan_name,
                                                           page_num))
            else:
                logging.warning('{}-{}-{}-{} 成交案例总数为0'.format(province_name, city_name, district_name, shangquan_name))
        else:
            logging.error('{}-{}-{}-{} 成交案例总数提取出错'.format(province_name, city_name, district_name, shangquan_name))

    def get_crawl_or_filter_cities(self) -> tuple:
        """
        获取配置参数
        :return: (['重庆', '成都', '武汉'], 1)
        """
        param_1 = getattr(self, 'CRAWL_CITIES', None)
        param_2 = getattr(self, 'FILTER_CITIES', None)
        param_3 = self.settings.get('CRAWL_CITIES', None)
        param_4 = self.settings.get('FILTER_CITIES', None)
        if param_1 or param_2:
            if param_1:
                return self.check_params(param_1), 1  # 1代表配置参数是爬取列表
            else:
                return self.check_params(param_2), 2  # 2代表配置参数是过滤列表
        elif param_3 or param_4:
            if param_3:
                return self.check_params(param_3), 1
            else:
                return self.check_params(param_4), 2
        else:
            return ['全国'], 1

    @staticmethod
    def check_params(params) -> list:
        """
        爬取城市参数校验和处理
        :param params:
        :return: ['重庆', '成都', '武汉']
        """
        regex_1 = re.compile('，|、|；|;')
        regex_2 = re.compile(r'^[\u4E00-\u9FFF]+$')
        if isinstance(params, str):
            try:
                params_li = regex_1.sub(',', params).split(',')
                ret_li = [param.strip() for param in params_li if
                          isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                assert ret_li and len(ret_li) == len(params_li)
            except:
                logger.error('参数输入错误，请重新输入，例如： scrapy crawl beike_cj -a crawl=重庆，北京')
                raise AssertionError(f'错误参数>>>>>>{params}')
            else:
                return ret_li
        elif isinstance(params, list):
            try:
                ret_li = [param.strip() for param in params if
                          isinstance(param, str) and param.strip() and regex_2.findall(param.strip())]
                assert ret_li and len(ret_li) == len(params)
            except:
                logger.error('参数配置出错，请重新配置，例如： CRAWL_CITIES = ["重庆"，"北京"]')
                raise AssertionError(f'错误参数>>>>>>{params}')
            else:
                return ret_li
        else:
            raise AssertionError(f'错误参数>>>>>>{params}')

    @staticmethod
    def get_chengjiao_toal_num(response):
        """
        提取成交案例总数
        :param response:
        :return:
        """
        try:
            chengjiao_total_num = int(response.xpath("//div[contains(text(),'共找到')]/span/text()").extract_first())
        except:
            return
        else:
            return chengjiao_total_num

    @staticmethod
    def deal_title(resp: str):
        """
        处理title信息
        :param resp:
        :return:
        """
        try:
            ret_li = [i for i in resp.strip().split(' ') if i]
            name = ret_li[0]
            room_type = None
            build_area = None
            for ret in ret_li:
                if '室' in ret:
                    room_type = ret
                elif ('㎡' in ret) or ('平米' in ret):
                    build_area = ret
        except:
            return None, None, None
        else:
            return name, room_type, build_area

    @staticmethod
    def deal_house_info(resp: list):
        """
        处理houseInfo信息
        :param resp:
        :return:
        """
        try:
            resp = ''.join(resp).split('|')
            ret_li = [''.join(i.split()) for i in resp]
            assert len(ret_li) >= 2
            decoration_type = ret_li[1]
            direction_type = None
            for ret in ret_li:
                if len(re.findall(r'[东南西北]', ret)):
                    direction_type = ret
        except:
            return None, None
        else:
            return direction_type, decoration_type

    @staticmethod
    def deal_date(resp: list):
        """
        处理日期
        :param resp:
        :return:
        """
        try:
            resp = [''.join(i.split()) for i in resp]
            resp = ''.join(resp)
            date = resp.replace('.', '-')
        except:
            return
        else:
            return date

    @staticmethod
    def get_case_id(case_url: str):
        """
        提取案例id
        :param case_url: https://cq.ke.com/chengjiao/106110927069.html
        :return:
        """
        try:
            if 'chengjiao' in case_url:
                case_id = case_url.split('chengjiao/')[1].split('.html')[0]
            else:
                case_id = case_url.split('ershoufang/')[1].split('.html')[0]
            assert case_id
        except Exception as e:
            logger.error('{} 案例id提取出错，error:{}'.format(case_url, e))
            return
        else:
            return case_id

    def deal_position_info(self, resp: list):
        """
        处理positionInfo信息
        :param resp:
        :return:
        """
        try:
            resp = ''.join(resp).split(' ')
            ret_li = [''.join(i.split()) for i in resp]
            floor = None
            total_floor = None
            build_year = None
            for ret in ret_li:
                if len(re.findall(r'\d+年', ret)):
                    build_year = ret
                elif '层' in ret:
                    floor, total_floor = self.deal_floor(ret)
        except:
            return None, None, None
        else:
            return build_year, floor, total_floor

    @staticmethod
    def deal_floor(floor: str):
        """
        处理楼层
        :param floor:
        :return:
        """
        try:
            floor = floor if '未知' not in floor else None
            total_floor = re.findall(r'共(.*?)层', floor)[0]
        except:
            total_floor = None
        return floor, total_floor
