# -*- coding: utf-8 -*-
import scrapy

import re
import logging
from copy import copy, deepcopy
from FDC_spider.items import FdcEstateGuidItem, FdcBuildingItem, FdcRoomItem

logger = logging.getLogger(__name__)


class JingmenSpider(scrapy.Spider):
    name = 'jingmen'
    allowed_domains = ['jmfc.com.cn']
    start_urls = ['https://www.jmfc.com.cn/yushou/index.html']
    project_li_url = 'https://www.jmfc.com.cn/yushou/index.html?page={}'  # GET 项目列表页url
    next_page_url = 'https://www.jmfc.com.cn/yushou/index.html?page={}'  # GET 项目列表翻页url
    project_detail_url = 'https://www.jmfc.com.cn/loupan/xinxi-{}.html'  # GET 项目详情url
    building_li_mould_url = 'https://www.jmfc.com.cn/build/buildinfo.html?id={}&act=extra&pg=louzhuang'  # GET 楼栋列表模板页url（用于获取参数）
    building_li_url = 'https://wap.jmfc.com.cn/pym/louzhuang.php?lp={}&code={}&initialWidth=1200&childId={}'  # GET 楼栋列表url
    room_li_url = 'https://wap.jmfc.com.cn/pym/fangyuan.php?lp={}&lz={}&initialWidth=1200&childId=build_extra'  # GET 房号列表url

    def parse(self, response):
        """
        获取项目列表首页
        :param response:
        :return:
        """
        # 获取当前页页数
        try:
            page_num = copy(response.meta['page_num'])
        except:
            page_num = 1
        # 获取当前页项目列表
        try:
            project_tr_li = response.xpath("//table[@class='match-table']/tbody/tr")
            assert project_tr_li, f'第{page_num}页 项目列表分组获取为空'
        except AssertionError as e:
            logger.error(e)
        else:
            for project_tr in project_tr_li:
                item_eg = FdcEstateGuidItem()
                item_eg['certDate'] = project_tr.xpath("./td[1]//span/text()").extract_first()
                item_eg['districtName'] = project_tr.xpath("./td[2]/text()").extract_first()
                item_eg['preSalePermit'] = project_tr.xpath("./td[3]/text()").extract_first()
                item_eg['projectName'] = project_tr.xpath("./td[4]/a/text()").extract_first()
                permit_url = project_tr.xpath("./td[4]/a/@href").extract_first()
                item_eg['developerName'] = project_tr.xpath("./td[5]/a/text()").extract_first()
                item_eg['permitTotalArea'] = project_tr.xpath("./td[6]/text()").extract_first()
                # 获取许可证详情
                if permit_url:
                    yield scrapy.Request(
                        permit_url,
                        callback=self.parse_permit_detail,
                        meta=dict(item_eg=deepcopy(item_eg), ),
                        priority=5,
                    )
                else:
                    logger.error('{}-{} 预售许可证详情url提取出错'.format(item_eg['projectName'], item_eg['preSalePermit'], ))

        # 翻页
        total_page_str = response.xpath("//a[text()='尾页']/@href").extract_first()
        total_page_num = self._get_total_page_num(total_page_str, page_num)
        for page_num in range(2, total_page_num + 1):
            # 构造翻页请求
            headers = {
                "referer": self.next_page_url.format(page_num - 1),
            }
            yield scrapy.Request(
                self.next_page_url.format(page_num),
                headers=headers,
                priority=6,
                meta=dict(page_num=deepcopy(page_num), ),
            )

    @staticmethod
    def _get_total_page_num(_str: str, page_num: int) -> int:
        """
        获取项目列表总页数
        :param _str:
        :return:
        """
        try:
            ret = int(_str.split('=')[1])
        except:
            logger.error(f'第{page_num}页 总页数获取失败')
            return 115  # 设置默认值，防止无法正常翻页
        else:
            return ret

    def parse_permit_detail(self, response):
        """
        获取许可证详情
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        item_eg['permitBrief'] = self._deal_item_space(
            response.xpath("//div[@class='rule-wrap']/p[1]/text()").extract())
        project_url = response.xpath("//table[@class='match-table']/tbody/tr/td[4]/a/@href").extract_first()
        item_eg['projectId'] = self._get_project_id(project_url)
        if item_eg['projectId']:
            # 获取项目详情
            yield scrapy.Request(
                self.project_detail_url.format(item_eg['projectId']),
                callback=self.parse_project_detail,
                meta=dict(item_eg=deepcopy(item_eg), ),
                dont_filter=True,  # 不进行过滤，防止其它预售许可证无法获取项目详情
                priority=6,
            )

            # 获取楼栋列表模板页，用于获取参数
            headers = {
                'referer': 'https://www.jmfc.com.cn/loupan/{}.html'.format(item_eg['projectId'])
            }
            yield scrapy.Request(
                self.building_li_mould_url.format(item_eg['projectId']),
                headers=headers,
                callback=self.parse_building_li_mould,
                meta=dict(item_eg=deepcopy(item_eg), ),
                dont_filter=False,  # 进行过滤去重，防止楼栋列表重复获取
                priority=7,
            )
        else:
            logger.error('{}-{} 项目id提取出错'.format(item_eg['projectName'], item_eg['preSalePermit']))

    def parse_project_detail(self, response):
        """
        获取项目详情
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        item_eg['projectUrl'] = response.request.url
        item_eg['projectAddress'] = response.xpath(
            "//div[contains(text(),'楼盘地址')]/following-sibling::div/text()").extract_first()
        item_eg['avgPrice'] = response.xpath(
            "//div[contains(text(),'参考均价')]/following-sibling::div/span/text()").extract_first()
        item_eg['salesTel'] = self._deal_item_space(
            response.xpath("//div[contains(text(),'电话')]/following-sibling::div/span//text()").extract())
        item_eg['saleAddress'] = response.xpath(
            "//div[contains(text(),'售楼处地址')]/following-sibling::div/text()").extract_first()
        item_eg['buildingCategory'] = response.xpath(
            "//div[contains(text(),'建筑类型')]/following-sibling::div/text()").extract_first()
        item_eg['propertyType'] = response.xpath(
            "//div[contains(text(),'物业类型')]/following-sibling::div/text()").extract_first()
        item_eg['decorateSituation'] = response.xpath(
            "//div[contains(text(),'交房标准')]/following-sibling::div/text()").extract_first()
        item_eg['floorAreaRatio'] = response.xpath(
            "//div[contains(text(),'容积率')]/following-sibling::div/text()").extract_first()
        item_eg['greeningRate'] = response.xpath(
            "//div[contains(text(),'绿化率')]/following-sibling::div/text()").extract_first()
        item_eg['coverageArea'] = self._deal_item_space(
            response.xpath("//div[contains(text(),'占地面积')]/following-sibling::div/text()").extract())
        item_eg['totalArea'] = self._deal_item_space(
            response.xpath("//div[contains(text(),'建筑面积')]/following-sibling::div/text()").extract())
        item_eg['parkingSpacesNum'] = response.xpath(
            "//div[contains(text(),'停车位')]/following-sibling::div/text()").extract_first()
        item_eg['totalHouseholds'] = response.xpath(
            "//div[contains(text(),'总户数')]/following-sibling::div/text()").extract_first()
        item_eg['propertyCompany'] = response.xpath(
            "//div[contains(text(),'物业公司')]/following-sibling::div/text()").extract_first()
        item_eg['propertyManagementFee'] = response.xpath(
            "//div[contains(text(),'物业费')]/following-sibling::div/text()").extract_first()
        item_eg['projectBrief'] = self._deal_item_space(
            response.xpath("//h2[contains(text(),'项目介绍')]/../following-sibling::div//p/text()").extract())
        yield item_eg

    def parse_building_li_mould(self, response):
        """
        获取楼栋列表模板页
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        # 获取参数
        try:
            child_id = response.xpath("//div[@id='container']/div/@id").extract_first()
            code = response.xpath("//div[@id='container']/div/@data-code").extract_first()
            assert child_id and code, '{}-{} 楼栋列表url构造参数获取失败'.format(item_eg['projectName'], item_eg['projectId'])
        except AssertionError as e:
            logger.error(e)
        else:
            # 构造请求，获取楼栋列表
            headers = {
                'Referer': 'https://www.jmfc.com.cn/',
            }
            yield scrapy.Request(
                self.building_li_url.format(item_eg['projectId'], code, child_id),
                headers=headers,
                callback=self.parse_building_li,
                meta=dict(item_eg=deepcopy(item_eg), ),
                dont_filter=False,  # 进行过滤去重，防止楼栋列表重复获取
                priority=8,
            )

    def parse_building_li(self, response):
        """
        获取楼栋列表
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        # 获取楼栋列表分组
        bd_tr_li = response.xpath("//table[@class='layui-table']/tbody/tr")
        if bd_tr_li:
            for bd_tr in bd_tr_li:
                item_bd = FdcBuildingItem()
                item_bd['projectName'] = item_eg['projectName']
                item_bd['projectId'] = item_eg['projectId']
                item_bd['blockName'] = bd_tr.xpath("./td[1]/text()").extract_first()
                item_bd['preSalePermit'] = bd_tr.xpath("./td[2]/text()").extract_first()
                item_bd['floorTotalNo'] = bd_tr.xpath("./td[3]/text()").extract_first()
                item_bd['totalRoomNo'] = bd_tr.xpath("./td[4]/text()").extract_first()
                item_bd['regulatoryBank'] = bd_tr.xpath("./td[5]/text()").extract_first()
                item_bd['regulatoryBankNum'] = bd_tr.xpath("./td[6]/text()").extract_first()
                room_li_url_str = bd_tr.xpath("./td[7]/a/@href").extract_first()
                # 构造房号列表url，提取楼栋id
                room_li_url, item_bd['buildingId'] = self._create_room_li_url(room_li_url_str)
                yield item_bd

                # 获取房号列表
                if item_bd['buildingId']:
                    headers = {
                        'Referer': 'https://www.jmfc.com.cn/',
                    }
                    yield scrapy.Request(
                        room_li_url,
                        headers=headers,
                        callback=self.parse_room_li,
                        meta=dict(item_bd=deepcopy(item_bd), ),
                        priority=9,
                    )
                else:
                    logger.error('{}-{}-{}-{} 楼栋id提取出错，无法进行房号列表的获取'.format(item_bd['projectName'], item_bd['projectId'],
                                                                           item_bd['blockName'],
                                                                           item_bd['preSalePermit']))

        else:
            logger.error('{}-{} 楼栋列表获取为空'.format(item_eg['projectName'], item_eg['projectId'], ))

    def parse_room_li(self, response):
        """
        获取房号列表
        :param response:
        :return:
        """
        item_bd = copy(response.meta['item_bd'])
        # 获取房号列表分组
        room_li = response.xpath("//li[@class='table-cell']")
        if room_li:
            for room_info in room_li:
                item_rm = FdcRoomItem()
                item_rm['projectName'] = item_bd['projectName']
                item_rm['projectId'] = item_bd['projectId']
                item_rm['blockName'] = item_bd['blockName']
                item_rm['buildingId'] = item_bd['buildingId']
                item_rm['roomId'] = room_info.xpath("./div/@data-fangjianid").extract_first()
                item_rm['roomNo'] = room_info.xpath("./div/@data-fh").extract_first()
                item_rm['roomArea'] = room_info.xpath("./div/@data-mj").extract_first()
                item_rm['innerArea'] = room_info.xpath("./div/@data-tnmj").extract_first()
                item_rm['roomNature'] = room_info.xpath("./div/@data-fwyt").extract_first()
                item_rm['roomUse'] = room_info.xpath("./div/@data-fwlx").extract_first()
                item_rm['roomFloor'] = room_info.xpath("./div/@data-fwlc").extract_first()
                item_rm['saleStatus'] = room_info.xpath("./div/@data-xszt").extract_first()
                item_rm['recordUnitPrice'] = room_info.xpath("./div/@data-xsjg").extract_first()
                yield item_rm
        else:
            logger.error(
                '{}-{}-{}-{} 房号列表获取为空'.format(item_bd['projectName'], item_bd['projectId'], item_bd['preSalePermit'],
                                              item_bd['blockName']))

    @staticmethod
    def _get_project_id(_str: str) -> str or None:
        """
        获取项目id
        :param _str:
        :return:
        """
        regex = re.compile(r'loupan/(.*)\.html')
        try:
            ret = regex.findall(_str)[0]
            assert ret
        except:
            return
        else:
            return ret

    @staticmethod
    def _deal_item_space(_li: list) -> str or None:
        """
        处理类型为list的字段，去空白
        :param _li:
        :return:
        """
        try:
            ret = ''.join([i.strip() for i in _li if i and i.strip()])
            assert ret
        except:
            return
        else:
            return ret

    def _create_room_li_url(self, _str: str) -> tuple:
        """
        构造房号列表url，提取楼栋id
        :param _str: 'https://www.jmfc.com.cn/build/buildinfo.html?id=244946&act=extra&pg=fangyuan&lz=47130'
        :return:
        """
        try:
            ret_1 = _str.split('?')[1]
            building_id = ret_1.split('lz=')[1]
            project_id = ret_1.split('&')[0].split('=')[1]
            assert building_id and project_id
            room_li_url = self.room_li_url.format(project_id, building_id)
        except:
            return None, None
        else:
            return room_li_url, building_id
