# -*- coding: utf-8 -*-
import scrapy

import re
import logging
from copy import copy, deepcopy
from FDC_spider.items import FdcEstateGuidItem, FdcBuildingItem, FdcRoomItem

logger = logging.getLogger(__name__)


class YueyangSpider(scrapy.Spider):
    name = 'yueyang'
    allowed_domains = ['yyfdcw.com']
    start_urls = ['http://yyfdcw.com/NewHouse/BuildingList.aspx']
    project_li_url = 'http://yyfdcw.com/NewHouse/BuildingList.aspx'  # GET/POST 项目列表url
    custom_settings = {
        'DOWNLOADER_MIDDLEWARES_BASE': {
            'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,
            'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300,
            'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 400,
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': None,
            'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 550,
            'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600,
            'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,
            'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750,
            'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 800,
            'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,
            'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900,
        },
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            "FDC_spider.middlewares.CustomRetryMiddleware": 500,
            'FDC_spider.middlewares.UserAgentMiddleware': 544,
            'FDC_spider.middlewares.YueyangProxyMiddleware': 545,  # 针对岳阳市爬虫的代理，需要同时开启 YueyangRedisConnPipeline
        },
        'ITEM_PIPELINES': {
            'FDC_spider.pipelines.YueyangRedisConnPipeline': 299,  # 针对岳阳市,启用redis中的代理，需要同时开启 YueyangProxyMiddleware
            'FDC_spider.pipelines.FdcSpiderPipeline': 300,
            'FDC_spider.pipelines.YueyangPipeline': 327,
            # 'FDC_spider.pipelines.YueyangCsvPipeline': 377,
            # 'FDC_spider.pipelines.MongoClientPipeline': 350,  # 将数据保存到mongo
        },
    }

    def parse(self, response):
        """
        获取项目列表
        :param response:
        :return:
        """
        # 获取当前页数
        try:
            page_num = copy(response.meta['page_num'])
        except:
            page_num = 1
        # 获取总页数
        try:
            total_page_num_str = self._deal_item(response.xpath("//div[@id='Pager']/div/text()").extract())
            total_page_num = self.get_total_page_num(total_page_num_str)
            assert isinstance(total_page_num, int)
        except:
            total_page_num = 46  # 设置一个默认值，防止提取出错，无法进行翻页
            logger.error(f'第{page_num}页 总页数提取出错，使用默认值')
        # 获取页面参数
        view_state = response.xpath("//input[@id='__VIEWSTATE']/@value").extract_first()
        view_state_generator = response.xpath("//input[@id='__VIEWSTATEGENERATOR']/@value").extract_first()
        event_target = 'Pager'
        # 获取当前页项目列表分组
        project_li = response.xpath("//ul[@class='newhul_list']/li")
        if project_li:
            for pt_li in project_li:
                item_eg = FdcEstateGuidItem()
                item_eg['projectName'] = pt_li.xpath("./div[1]//input/@name").extract_first()
                item_eg['projectId'] = pt_li.xpath("./div[1]//input/@id").extract_first()
                item_eg['openingDate'] = pt_li.xpath("./div[@class='listinfo']/ul/li[1]/text()").extract_first()
                item_eg['totalHouseholds'] = self._deal_item(
                    pt_li.xpath("./div[@class='listinfo']/ul/li[3]/text()").extract())
                item_eg['propertyManagementFee'] = self._deal_item(
                    pt_li.xpath("./div[@class='listinfo']/ul/li[4]/text()").extract())
                project_url = pt_li.xpath("./div[1]/a/@href").extract_first()
                # 获取项目详情
                yield response.follow(
                    project_url,
                    callback=self.parse_project_detail,
                    meta=dict(item_eg=deepcopy(item_eg), page_num=deepcopy(page_num), ),
                    priority=7,
                )
        else:
            logger.warning(f'第{page_num}页 项目列表获取为空')

        # 翻页
        if page_num < total_page_num:
            page_num += 1
            data = {
                '__EVENTTARGET': event_target,
                '__EVENTARGUMENT': str(page_num),
                '__VIEWSTATE': view_state,
                '__VIEWSTATEGENERATOR': view_state_generator,
                'ddlYear': '-1',
                'ddlArea': '-1',
                'ddlZhuangtai': '-1',
                'ddlPrice': '-1',
                'ddlWyType': '-1',
                'txtKeyWord': '',
                'Pager_input': str(page_num - 1),
            }
            yield scrapy.FormRequest(
                self.project_li_url,
                formdata=data,
                meta=dict(page_num=deepcopy(page_num), ),
            )

    def parse_project_detail(self, response):
        """
        获取项目详情
        :param response:
        :return:
        """
        page_num = response.meta['page_num']
        item_eg = response.meta['item_eg']
        item_eg['projectUrl'] = response.request.url
        item_eg['projectAddress'] = response.xpath("//b[contains(text(),'楼盘位置')]/../text()").extract_first()
        item_eg['districtName'] = response.xpath("//span[@id='lblAreaName']/text()").extract_first()
        item_eg['developerName'] = response.xpath("//span[@id='lblKfs']/text()").extract_first()
        item_eg['propertyCompany'] = response.xpath("//span[@id='lblWgdw']/text()").extract_first()
        item_eg['landUseRightNum'] = response.xpath("//span[@id='Label1']/span/@title").extract_first()
        item_eg['landUsageTerm'] = response.xpath("//span[@id='Label2']/text()").extract_first()
        item_eg['buildingCategory'] = self._deal_item(response.xpath("//span[@id='lblJzmj']/text()").extract_first())
        item_eg['projectUse'] = response.xpath("//span[@id='lblType']/text()").extract_first()
        item_eg['mainArea'] = self._deal_item(response.xpath("//span[@id='lblHxmj']/text()").extract())
        item_eg['greeningRate'] = response.xpath("//span[@id='lblLhl']/text()").extract_first()
        item_eg['totalArea'] = response.xpath("//span[@id='lblZongmianji']/text()").extract_first()
        item_eg['planningTotalBuildingArea'] = response.xpath("//span[@id='Label5']/text()").extract_first()
        item_eg['planningTotalHouseholds'] = response.xpath("//span[@id='lblZhs']/text()").extract_first()
        item_eg['salesTel'] = response.xpath("//span[@id='lblTelephone']/text()").extract_first()
        item_eg['totalRoomNo'] = response.xpath("//div[@class='p10']//tr[2]/td[1]/font/text()").extract_first()
        item_eg['totalSaleableRoomNum'] = response.xpath("//div[@class='p10']//tr[2]/td[2]/font/text()").extract_first()
        item_eg['totalSoldRoomNum'] = response.xpath("//div[@class='p10']//tr[2]/td[4]/font/text()").extract_first()
        item_eg['recordUnitPrice'] = self._deal_item(
            response.xpath("//div[@class='p10']//tr[2]/td[5]/font/text()").extract())
        yield item_eg

        # 获取楼栋列表分组
        building_li = response.xpath("//ul[@class='dongDate']/li[position()>1]")
        if building_li:
            for bd_li in building_li:
                item_bd = FdcBuildingItem()
                item_bd['projectName'] = item_eg['projectName']
                item_bd['projectId'] = item_eg['projectId']
                item_bd['districtName'] = item_eg['districtName']
                item_bd['blockName'] = self._deal_item(bd_li.xpath(".//tr/td[1]/a/text()").extract())
                item_bd['preSalePermit'] = self._deal_item(bd_li.xpath(".//tr/td[2]/a/text()").extract())
                item_bd['certDate'] = self._deal_item(bd_li.xpath(".//tr/td[4]/text()").extract())
                bd_url = bd_li.xpath(".//tr/td[1]/a/@href").extract_first()
                if bd_url:
                    # 获取楼栋详情
                    yield response.follow(
                        bd_url,
                        callback=self.parse_building_detail,
                        meta=dict(item_bd=deepcopy(item_bd), ),
                        priority=8,
                    )
                else:
                    logger.error(
                        '{} 第{}页-{}-{} 楼栋url提取出错'.format(item_eg['projectUrl'], page_num, item_bd['projectName'],
                                                         item_bd['blockName'], ))
        else:
            logger.warning('{} 第{}页-{} 楼栋列表获取为空'.format(item_eg['projectUrl'], page_num, item_eg['projectName'], ))

    def parse_building_detail(self, response):
        """
        获取楼栋详情
        :param response:
        :return:
        """
        item_bd = copy(response.meta['item_bd'])
        item_bd['buildingUrl'] = response.request.url
        item_bd['buildingId'] = item_bd['buildingUrl'].split('Bid=')[1]
        item_bd['floorTotalNo'] = self._deal_item(response.xpath("//span[@id='lblFwcs']/text()").extract())
        item_bd['orientation'] = self._deal_item(response.xpath("//span[@id='lblFwcx']/text()").extract())
        item_bd['buildingStructure'] = self._deal_item(response.xpath("//span[@id='lblFwjg']/text()").extract())
        item_bd['buildingTotalArea'] = self._deal_item(response.xpath("//span[@id='lblAllMj']/text()").extract())
        item_bd['commencementDate'] = self._deal_item(response.xpath("//span[@id='lblJgDate']/text()").extract())
        item_bd['completionDate'] = self._deal_item(response.xpath("//span[@id='lblJgrq']/text()").extract())
        item_bd['handoverDate'] = self._deal_item(response.xpath("//span[@id='lblJfrq']/text()").extract())
        item_bd['buildingHeight'] = self._deal_item(response.xpath("//span[@id='lblFwgd']/text()").extract())
        yield item_bd

        # 获取房号列表
        room_li_url = response.xpath("//iframe[@id='myiframe']/@src").extract_first()
        if room_li_url:
            yield scrapy.Request(
                room_li_url,
                callback=self.parse_room_li,
                meta=dict(item_bd=deepcopy(item_bd), ),
                priority=9,
            )
        else:
            logger.error(
                '{} {}-{} 房号列表url提取失败'.format(response.request.url, item_bd['projectName'], item_bd['blockName']))

    def parse_room_li(self, response):
        """
        获取房号列表
        :param response:
        :return:
        """
        item_bd = copy(response.meta['item_bd'])
        # 获取房号列表
        room_li = response.xpath("//table//table//table//table//tr/td[2]/input")
        if room_li:
            for i, room_input in enumerate(room_li):
                item_rm = FdcRoomItem()
                item_rm['projectName'] = item_bd['projectName']
                item_rm['projectId'] = item_bd['projectId']
                item_rm['blockName'] = item_bd['blockName']
                item_rm['buildingId'] = item_bd['buildingId']
                # 获取房号销售状态
                status_title_str = room_input.xpath("./@title").extract_first()
                item_rm['saleStatus'] = self._get_room_status(status_title_str)
                # 获取房号详情url
                room_url_str = room_input.xpath("./@onclick").extract_first()
                room_url = self._get_room_url(room_url_str)
                if room_url:
                    # 获取房号详情
                    yield response.follow(
                        room_url,
                        callback=self.parse_room_detail,
                        meta=dict(item_rm=deepcopy(item_rm), ),
                        priority=10,
                    )
                else:
                    logger.error(
                        '{} {}-{}-第{}个 房号url提取出错'.format(response.request.url, item_bd['projectName'],
                                                         item_bd['blockName'], i + 1))
        else:
            logger.error('{} {}-{} 房号列表提取为空'.format(response.request.url, item_bd['projectName'], item_bd['blockName']))

    def parse_room_detail(self, response):
        """
        获取房号详情
        :param response:
        :return:
        """
        item_rm = copy(response.meta['item_rm'])
        floor_str = self._deal_item(
            response.xpath("//td[contains(text(),'名义层')]/following-sibling::td/text()").extract())
        item_rm['nominalFloor'], item_rm['physicsFloor'] = self._deal_room_floor(floor_str)
        item_rm['roomNo'] = response.xpath("//td[contains(text(),'室号')]/following-sibling::td/text()").extract_first()
        item_rm['roomStructure'] = response.xpath(
            "//td[contains(text(),'房型')]/following-sibling::td/text()").extract_first()
        item_rm['roomUse'] = response.xpath("//td[contains(text(),'用途')]/following-sibling::td/text()").extract_first()
        item_rm['forecastBuildArea'] = response.xpath(
            "//td[contains(text(),'预测建筑')]/following-sibling::td/text()").extract_first()
        item_rm['forecastInnerArea'] = response.xpath(
            "//td[contains(text(),'预测套内')]/following-sibling::td/text()").extract_first()
        item_rm['forecastSharedArea'] = response.xpath(
            "//td[contains(text(),'预测分摊')]/following-sibling::td/text()").extract_first()
        item_rm['forecastVerandaArea'] = response.xpath(
            "//td[contains(text(),'预测阳台')]/following-sibling::td/text()").extract_first()
        item_rm['apportionmentCoefficient'] = response.xpath(
            "//td[contains(text(),'分摊系数')]/following-sibling::td/text()").extract_first()
        price_str = self._deal_item(
            response.xpath("//td[contains(text(),'申报价格')]/following-sibling::td/text()").extract())
        item_rm['recordUnitPrice'], item_rm['recordTotalPrice'] = self._deal_room_price(price_str)
        yield item_rm

    @staticmethod
    def _deal_item(_li):
        """
        处理item字段
        :param _li:
        :return:
        """
        try:
            ret = ''.join([str(i) for i in _li]).strip()
            assert ret and '暂定' not in ret and ret != '0'
        except:
            return
        else:
            return ret

    @staticmethod
    def get_total_page_num(_str) -> int or None:
        """
        提取总页数
        :param _str:
        :return:
        """
        regex = re.compile(r'/(\d+?)页')
        try:
            ret = int(regex.findall(_str)[0])
            assert ret
        except:
            return
        else:
            return ret

    @staticmethod
    def _get_room_status(_str):
        """
        获取房号销售状态
        :param _str:
        :return:
        """
        regex = re.compile(r'颜色.*［(.*?)］', re.S)
        try:
            ret = regex.findall(_str)[0]
            assert ret
        except:
            return
        else:
            return ret

    @staticmethod
    def _get_room_url(_str):
        """
        获取房号url
        :param _str:
        :return:
        """
        try:
            ret = _str.split("'")[1]
            assert ret
        except:
            return
        else:
            return ret

    @staticmethod
    def _deal_room_floor(_str):
        """
        处理房号名义层/实际层
        :param _str:
        :return:
        """
        try:
            ret = _str.split('/')
            assert ret
            tuple_ret = (ret[0] if ret[0] else None, ret[1] if ret[1] else None)
        except:
            return None, None
        else:
            return tuple_ret

    @staticmethod
    def _deal_room_price(_str):
        """
        处理房号价格/总价
        :param _str:
        :return:
        """
        regex = re.compile(r'\d+\.{0,1}\d*')
        try:
            ret = regex.findall(_str)
            assert ret and len(ret) <= 2
            if len(ret) < 2:
                ret.insert(1, None)
            tuple_ret = (ret[0] if ret[0] else None, ret[1] if ret[1] else None)
        except:
            return None, None
        else:
            return tuple_ret