# -*- coding: utf-8 -*-
import scrapy

import logging
import re
import urllib.parse
from copy import copy, deepcopy
from FDC_spider.items import FdcEstateGuidItem, FdcBuildingItem, FdcRoomItem

logger = logging.getLogger(__name__)


class DongguanSpider(scrapy.Spider):
    name = 'dongguan'
    allowed_domains = ['dg.gov.cn']
    start_urls = ['http://dgfc.dg.gov.cn/dgwebsite_v2/Vendition/Index.aspx']
    project_li_url = 'http://dgfc.dg.gov.cn/dgwebsite_v2/Vendition/ProjectInfo.aspx?new=1'  # GET/POST 项目列表url
    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
            "FDC_spider.middlewares.CustomRetryMiddleware": 500,
            'FDC_spider.middlewares.UserAgentMiddleware': 544,
            # 'FDC_spider.middlewares.DongguanOuterNetProxyMiddleware': 545,  # 针对东莞爬虫的外网代理，需要同时开启 RedisConnPipeline
            # 'FDC_spider.middlewares.DongguanInnerNetProxyMiddleware': 545,  # 针对东莞爬虫的内网代理
        }
    }

    def start_requests(self):
        headers = {
            'Referer': self.start_urls[0]
        }
        yield scrapy.Request(
            self.project_li_url,
            headers=headers,
            meta=dict(index=0, )
        )

    def parse(self, response):
        index = copy(response.meta['index'])
        if index:
            item_eg = FdcEstateGuidItem()
            district_str = response.xpath("//select[@id='townName']/option[{}]/text()".format(index)).extract_first()
            item_eg['districtName'] = re.sub(r'东莞市', '', district_str) if district_str else None
            # 获取楼盘列表
            tr_li = response.xpath("//table[@class='resultTable1']/tr[position()>1]")
            if len(tr_li):
                for tr in tr_li:
                    item_eg['projectName'] = tr.xpath("./td[1]/a/text()").extract_first()
                    item_eg['projectUrl'] = tr.xpath("./td[1]/a/@href").extract_first()
                    projectId_li = re.findall(r'id=(\d+)', item_eg['projectUrl'])
                    item_eg['projectId'] = projectId_li[0] if len(projectId_li) else None
                    item_eg['saleableRoomNo'] = tr.xpath("./td[3]/a/text()").extract_first()
                    yield response.follow(
                        item_eg['projectUrl'],
                        callback=self.parse_builiding_li,
                        meta={"item_eg": deepcopy(item_eg)},
                        priority=7,
                    )
            else:
                logger.warning('{}  {} 楼盘列表为空'.format(response.request.url, item_eg['districtName']))

        # 构造下一个 行政区/县 POST请求
        total_index = len(response.xpath("//select[@id='townName']/option"))
        if index < total_index:
            index += 1
            VIEWSTATE = response.xpath("//input[@id='__VIEWSTATE']/@value").extract_first()
            EVENTVALIDATION = response.xpath("//input[@id='__EVENTVALIDATION']/@value").extract_first()
            result_count = response.xpath("//input[@id='resultCount']/@value").extract_first()
            town_name = response.xpath("//select[@id='townName']/option[{}]/@value".format(index)).extract_first()
            # 构造POST请求参数
            headers = {
                'Referer': self.project_li_url,
            }
            data = {
                '__VIEWSTATE': VIEWSTATE,
                '__EVENTVALIDATION': EVENTVALIDATION,
                'townName': town_name,
                'usage': '',
                'projectName': '',
                'projectSite': '',
                'developer': '',
                'area1': '',
                'area2': '',
                'resultCount': result_count,
                'pageIndex': '0',
            }
            yield scrapy.FormRequest(
                self.project_li_url,
                headers=headers,
                formdata=data,
                callback=self.parse,
                meta=dict(index=deepcopy(index)),
                priority=10,
            )

    def parse_builiding_li(self, response):
        """
        获取楼栋列表信息
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        item_eg['projectUrl'] = response.request.url
        try:
            qurey_str = item_eg['projectUrl'].split('?')[1]
            estate_url_temp = 'http://dgfc.dg.gov.cn/dgwebsite_v2/Vendition/BeianView.aspx?{}'
            item_eg['estateUrl'] = estate_url_temp.format(qurey_str)
        except Exception as e:
            logger.warning('{}  {}-{} 楼盘详情url构造失败  error：{}'.format(response.request.url, item_eg['districtName'],
                                                                    item_eg['projectName'], e))
        else:
            # 获取楼栋列表
            tr_li = response.xpath("//table[@id='houseTable_1']//tr[position()>1]")
            item_bd_li = []
            if len(tr_li):
                for tr in tr_li:
                    item_bd = FdcBuildingItem()
                    item_bd['districtName'] = item_eg['districtName']
                    item_bd['projectName'] = item_eg['projectName']
                    item_bd['projectId'] = item_eg['projectId']
                    item_bd['preSalePermit'] = tr.xpath("./td[1]//text()").extract_first()
                    item_bd['buildingLocation'] = tr.xpath("./td[2]//text()").extract_first()
                    buildingUrl = tr.xpath("./td[1]/a/@href").extract_first()
                    item_bd['buildingUrl'] = urllib.parse.urljoin(response.request.url, buildingUrl)
                    buildingId_li = re.findall(r'id=(\d+)', item_bd['buildingUrl'])
                    item_bd['buildingId'] = buildingId_li[0] if len(buildingId_li) else None
                    item_bd['floorTotalNo'] = tr.xpath("./td[3]//text()").extract_first()
                    item_bd['roomTotalNo'] = tr.xpath("./td[4]//text()").extract_first()
                    item_bd['roomUse'] = tr.xpath("./td[5]//text()").extract_first()
                    item_bd['buildingTotalArea'] = tr.xpath("./td[6]//text()").extract_first()
                    item_bd_li.append(item_bd)
            else:
                logger.warning(
                    '{} {}-{} 楼栋列表获取为空'.format(response.request.url, item_eg['districtName'], item_eg['projectName']))
            if len(tr_li) == 20:
                logger.warning(
                    '{} {}-{} 楼栋列表可能存在翻页'.format(response.request.url, item_eg['districtName'], item_eg['projectName']))
            yield scrapy.Request(
                item_eg['estateUrl'],
                callback=self.parse_estate_detail,
                meta={'item_eg': deepcopy(item_eg), 'item_bd_li': deepcopy(item_bd_li)},
                priority=8,
            )

    def parse_estate_detail(self, response):
        """
        获取楼盘详情
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        item_bd_li = copy(response.meta['item_bd_li'])
        item_eg['estateName'] = response.xpath("//span[@id='Projectname']/text()").extract_first()
        item_eg['estateAddress'] = response.xpath("//span[@id='Address']/text()").extract_first()
        item_eg['roomUse'] = response.xpath("//span[@id='yongtu']/text()").extract_first()
        item_eg['developerName'] = response.xpath("//span[@id='Companyname']/text()").extract_first()
        item_eg['preSalePermit'] = response.xpath("//span[@id='YsZheng']/text()").extract_first()
        item_eg['estateTotalArea'] = response.xpath("//span[@id='Totalarea']/text()").extract_first()
        item_eg['totalRoomNo'] = response.xpath("//span[@id='TotalRoom']/text()").extract_first()
        item_eg['saleableArea'] = response.xpath("//span[@id='KeArea']/text()").extract_first()
        item_eg['saleableRoomNo'] = response.xpath("//span[@id='KeRoom']/text()").extract_first()
        item_eg['soldArea'] = response.xpath("//span[@id='YiArea']/text()").extract_first()
        item_eg['soldRoomNo'] = response.xpath("//span[@id='YiRoom']/text()").extract_first()
        item_eg['unsaleableArea'] = response.xpath("//span[@id='NoArea']/text()").extract_first()
        item_eg['unsaleableRoomNo'] = response.xpath("//span[@id='NoRoom']/text()").extract_first()
        yield item_eg
        if len(item_bd_li):
            for item_bd in item_bd_li:
                item_bd['estateName'] = item_eg['estateName']
                item_bd['developerName'] = item_eg['developerName']
                yield item_bd
                yield scrapy.Request(
                    item_bd['buildingUrl'],
                    callback=self.parse_room_li,
                    meta={'item_bd': deepcopy(item_bd)},
                    priority=9,
                )

    def parse_room_li(self, response):
        """
        获取房号列表信息
        :param response:
        :return:
        """
        item_bd = copy(response.meta['item_bd'])
        # 获取房号列表
        tr_li = response.xpath("//table[@id='roomTable']/tr[position()>1]")
        if len(tr_li):
            for tr in tr_li:
                item_rm = FdcRoomItem()
                item_rm['districtName'] = item_bd['districtName']
                item_rm['estateName'] = item_bd['estateName']
                item_rm['projectName'] = item_bd['projectName']
                item_rm['buildingId'] = item_bd['buildingId']
                item_rm['buildingLocation'] = item_bd['buildingLocation']
                item_rm['roomFloor'] = tr.xpath("./td[1]/text()").extract_first()
                # 获取该楼层的房号分组
                td_li = tr.xpath("./td[2]//tr/td")
                if len(td_li):
                    for td in td_li:
                        rm_info_str = td.xpath("./@title").extract_first()
                        if rm_info_str:
                            item_rm['roomNo'] = td.xpath(".//text()").extract_first()
                            try:
                                rm_info_dict = {i.split('：')[0]: i.split('：')[1] for i in
                                                re.sub(r'[\r\n|\t]', '+', rm_info_str).split('+')}
                                item_rm['roomArea'], item_rm['roomUse'], item_rm['saleStatus'], item_rm['pubPrice'], \
                                item_rm['pubUnitPrice'] = (rm_info_dict.get(i, None) for i in
                                                           ['建筑面积', '规划用途', '房屋状态', '公示价格', '公示单价'])
                            except Exception as e:
                                logger.error('{}  {}-{}-{}-{}-{} 房号信息获取失败  error:{}'.format(response.request.url,
                                                                                            item_rm['districtName'],
                                                                                            item_rm['projectName'],
                                                                                            item_rm['buildingLocation'],
                                                                                            item_rm['roomFloor'],
                                                                                            item_rm['roomNo'], e))
                            else:
                                yield item_rm
                else:
                    logger.warning('{}  {}-{}-{}-{} 房号列表获取为空'.format(response.request.url, item_rm['districtName'],
                                                                     item_rm['projectName'],
                                                                     item_rm['buildingLocation'], item_rm['roomFloor']))
        else:
            logger.warning(
                '{}  {}-{}-{} 房号列表获取为空'.format(response.request.url, item_bd['districtName'], item_bd['projectName'],
                                               item_bd['buildingLocation']))
