# -*- coding: utf-8 -*-
import scrapy

import re
import logging
from copy import copy, deepcopy
from FDC_spider.items import FdcEstateGuidItem, FdcBuildingItem, FdcRoomItem

logger = logging.getLogger(__name__)


class XiaoganSpider(scrapy.Spider):
    name = 'xiaogan'
    allowed_domains = ['219.139.73.205']
    start_urls = ['http://219.139.73.205:8001/Index.aspx']
    project_li_url = 'http://219.139.73.205:8001/More_xm.aspx?page={}'  # GET 项目列表url
    room_detail_url = 'http://219.139.73.205:8001/Pub_House.aspx?xmfwbm={}'  # GET 房号详情url

    def start_requests(self):
        """
        获取项目列表首页
        :return:
        """
        headers = {
            'Referer': self.start_urls[0],
        }
        page_num = 1
        yield scrapy.Request(
            self.project_li_url.format(page_num),
            headers=headers,
            meta=dict(page_num=page_num, ),
        )

    def parse(self, response):
        """
        获取项目列表
        :param response:
        :return:
        """
        page_num = copy(response.meta['page_num'])
        try:
            # 获取当前页项目列表
            pt_tr_li = response.xpath("//b[contains(text(),'项目名称')]/../../following-sibling::tr")
            assert pt_tr_li, f'第{page_num}页，项目列表获取为空'
        except AssertionError as e:
            logger.error(e)
        else:
            for pt_tr in pt_tr_li:
                item_eg = FdcEstateGuidItem()
                item_eg['projectName'] = self._deal_space(pt_tr.xpath("./td[1]//text()").extract())
                item_eg['projectUrl'] = pt_tr.xpath("./td[1]//a/@href").extract_first()
                item_eg['projectAddress'] = self._deal_space(pt_tr.xpath("./td[2]//text()").extract())
                item_eg['districtName'] = self._deal_space(pt_tr.xpath("./td[3]//text()").extract())
                item_eg['totalRoomNum'] = self._deal_space(pt_tr.xpath("./td[4]//text()").extract())
                item_eg['totalSaleableRoomNum'] = self._deal_space(pt_tr.xpath("./td[5]//text()").extract())
                # 获取项目详情
                yield response.follow(
                    item_eg['projectUrl'],
                    callback=self.parse_project_detail,
                    meta=dict(item_eg=deepcopy(item_eg, ), ),
                    priority=6,
                )

        # 翻页
        try:
            total_page_num = int(response.xpath(
                "//select[@id='NewProjectList1$AspNetPager1_input']/option[last()]/@value").extract_first())
            assert total_page_num
        except:
            logger.error(f'第{page_num}页，总页数提取出错')
            total_page_num = 14  # 设置一个默认值，防止异常时无法正常翻页
        for i in range(2, total_page_num + 1):
            yield scrapy.Request(
                self.project_li_url.format(i),
                meta=dict(page_num=i, ),
                priority=5,
            )

    def parse_project_detail(self, response):
        """
        获取项目详情
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        item_eg['projectUrl'] = response.request.url
        item_eg['projectId'] = item_eg['projectUrl'].split('=')[1]
        item_eg['developerName'] = response.xpath("//span[@id='ProjectInfo1_lblCorpName']/text()").extract_first()
        item_eg['propertyType'] = response.xpath("//span[@id='ProjectInfo1_lblProjectType']/text()").extract_first()
        item_eg['commencementDate'] = response.xpath("//span[@id='ProjectInfo1_lblJhkgrq']/text()").extract_first()
        item_eg['completionDate'] = response.xpath("//span[@id='ProjectInfo1_lblJhjfsyrq']/text()").extract_first()
        item_eg['completionDate'] = response.xpath("//span[@id='ProjectInfo1_lblJhjfsyrq']/text()").extract_first()
        item_eg['floorAreaRatio'] = response.xpath("//span[@id='ProjectInfo1_lblRjl']/text()").extract_first()
        item_eg['greeningRate'] = response.xpath("//span[@id='ProjectInfo1_lblJdl']/text()").extract_first()
        item_eg['coverageArea'] = response.xpath("//span[@id='ProjectInfo1_lblXmzgm']/text()").extract_first()
        item_eg['totalArea'] = response.xpath("//span[@id='ProjectInfo1_lblZjzmj']/text()").extract_first()
        # 获取预售许可证列表
        permit_tr_li = response.xpath("//td[contains(text(),'预售许可证')]/../following-sibling::tr")
        if permit_tr_li:
            for permit_tr in permit_tr_li:
                item_eg['preSalePermit'] = self._deal_space(permit_tr.xpath("./td[1]/text()").extract())
                item_eg['preSaleBlockName'] = self._deal_space(permit_tr.xpath("./td[2]/text()").extract())
                item_eg['certDate'] = self._deal_space(permit_tr.xpath("./td[3]/text()").extract())
                bd_li_url = permit_tr.xpath("./td[4]/a/@href").extract_first()  # 楼栋列表url
                item_eg['permitId'] = bd_li_url.split("=")[1]  # 预售许可证id
                yield item_eg

                # 获取楼栋列表
                yield response.follow(
                    bd_li_url,
                    callback=self.parse_building_li,
                    meta=dict(item_eg=deepcopy(item_eg), ),
                    priority=7,
                )
        else:
            logger.warning('{} {} 预售许可证列表获取为空'.format(item_eg['projectName'], item_eg['projectUrl'], ))
            item_eg['preSalePermit'] = None
            item_eg['preSaleBlockName'] = None
            item_eg['certDate'] = None
            item_eg['permitId'] = None
            yield item_eg

    def parse_building_li(self, response):
        """
        获取楼栋列表
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        # 获取楼栋列表
        bd_tr_li = response.xpath("//td[contains(text(),'总层数')]/../following-sibling::tr")
        if bd_tr_li:
            for bd_tr in bd_tr_li:
                item_bd = FdcBuildingItem()
                item_bd['projectName'] = item_eg['projectName']
                item_bd['projectId'] = item_eg['projectId']
                item_bd['preSalePermit'] = item_eg['preSalePermit']
                item_bd['permitId'] = item_eg['permitId']
                item_bd['blockName'] = self._deal_space(bd_tr.xpath("./td[1]/text()").extract())
                item_bd['floorTotalNo'] = self._deal_space(bd_tr.xpath("./td[2]/text()").extract())
                item_bd['totalRoomNo'] = self._deal_space(bd_tr.xpath("./td[3]/text()").extract())
                item_bd['saleableRoomNo'] = self._deal_space(bd_tr.xpath("./td[4]/text()").extract())
                bd_detail_url = bd_tr.xpath("./td[5]/a/@href").extract_first()

                # 获取楼栋详情
                yield response.follow(
                    bd_detail_url,
                    callback=self.parse_building_detail,
                    meta=dict(item_bd=deepcopy(item_bd), ),
                    priority=8,
                )
        else:
            logger.warning(
                '{}-{} {} 楼栋列表获取失败'.format(item_eg['projectName'], item_eg['preSalePermit'], response.request.url))

    def parse_building_detail(self, response):
        """
        获取楼栋详情
        :param response:
        :return:
        """
        item_bd = copy(response.meta['item_bd'])
        item_bd['buildingTotalArea'] = response.xpath("//span[@id='BuildingInfo1_lblJzmj']/text()").extract_first()
        item_bd['buildingResidenceArea'] = response.xpath("//span[@id='BuildingInfo1_lblZzmj']/text()").extract_first()
        item_bd['buildingNonResidenceArea'] = response.xpath(
            "//span[@id='BuildingInfo1_lblFzzmj']/text()").extract_first()
        item_bd['buildingInnerArea'] = response.xpath("//span[@id='BuildingInfo1_lblSumfwtnmj']/text()").extract_first()
        item_bd['buildingUrl'] = response.request.url
        item_bd['buildingId'] = item_bd['buildingUrl'].split("=")[1]
        yield item_bd

        # 获取房号列表分组
        room_str_li = response.xpath("//table[@id]//td[@rowspan]//a/@onclick").extract()
        for room_str in room_str_li:
            room_id = self._get_room_id(room_str)
            if room_id:
                # 获取房号详情
                yield scrapy.Request(
                    self.room_detail_url.format(room_id),
                    callback=self.parse_room_detail,
                    priority=9,
                    meta=dict(item_bd=deepcopy(item_bd), room_id=deepcopy(room_id), ),
                )
            else:
                logger.error('{}-{}-{} 房号id提取失败'.format(item_bd['projectName'], item_bd['blockName'], room_str))

    def parse_room_detail(self, response):
        """
        获取房号详情
        :param response:
        :return:
        """
        item_bd = copy(response.meta['item_bd'])
        item_rm = FdcRoomItem()
        item_rm['projectName'] = item_bd['projectName']
        item_rm['projectId'] = item_bd['projectId']
        item_rm['blockName'] = item_bd['blockName']
        item_rm['buildingId'] = item_bd['buildingId']
        item_rm['roomUrl'] = response.request.url
        item_rm['roomId'] = response.meta['room_id']
        item_rm['roomLocation'] = response.xpath("//span[@id='HouseInfo1_lblZl']/text()").extract_first()
        item_rm['roomFloor'] = response.xpath("//span[@id='HouseInfo1_lblFwlc']/text()").extract_first()
        item_rm['roomNo'] = response.xpath("//span[@id='HouseInfo1_lblFwfh']/text()").extract_first()
        item_rm['roomUse'] = response.xpath("//span[@id='HouseInfo1_lblFwlx']/text()").extract_first()
        item_rm['roomStructure'] = response.xpath("//span[@id='HouseInfo1_lblFwhx']/text()").extract_first()
        item_rm['forecastBuildArea'] = response.xpath("//span[@id='HouseInfo1_lblycfwjzmj']/text()").extract_first()
        item_rm['forecastInnerArea'] = response.xpath("//span[@id='HouseInfo1_lblycfwtnmj']/text()").extract_first()
        item_rm['forecastSharedArea'] = response.xpath("//span[@id='HouseInfo1_lblycfwftmj']/text()").extract_first()
        item_rm['saleStatus'] = self._deal_sale_status(response.xpath("//span[@id='HouseInfo1_lblxszt']/text()").extract())
        item_rm['orientation'] = response.xpath("//span[@id='HouseInfo1_lblCx']/text()").extract_first()
        item_rm['recordUnitPrice'] = response.xpath("//span[@id='HouseInfo1_lblnxjg']/text()").extract_first()
        yield item_rm

    @staticmethod
    def _deal_space(_li: list) -> str or None:
        """
        处理字段空白
        :param _li:
        :return:
        """
        try:
            ret = ''.join([i.strip() for i in _li if i and i.strip()])
            assert ret
        except:
            return
        else:
            return ret

    @staticmethod
    def _get_room_id(_str: str) -> str or None:
        """
        获取房号id
        :param _str:
        :return:
        """
        regex = re.compile(r"Pub_House\.aspx\?xmfwbm=(.*)\'")
        try:
            ret = regex.findall(_str)[0]
            assert ret
        except:
            return
        else:
            return ret

    @staticmethod
    def _deal_sale_status(_li: list) -> str or None:
        """
        获取销售状态
        :param _li:
        :return:
        """
        try:
            ret = ''.join([str(i).strip() for i in _li if i and str(i).strip()])
            assert ret
        except:
            return
        else:
            return ret
