# -*- coding: utf-8 -*-
import scrapy

import re
import os
import random
import logging
import urllib.parse
from copy import copy, deepcopy
from FDC_spider.constants import WH_RM_STATUS_DICT
from FDC_spider.items import FdcEstateGuidItem, FdcBuildingItem, FdcRoomItem

logger = logging.getLogger(__name__)
BASE_PATH = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)), 'images')


class WuhanSpider(scrapy.Spider):
    name = 'wuhan'
    allowed_domains = ['fgj.wuhan.gov.cn', '119.97.201.22']
    start_urls = ['http://fgj.wuhan.gov.cn/bsfw_44/zxsbhcxxt/spfxm/']
    eg_li_url = 'http://119.97.201.22:8083/search/spfxmcx/spfcx_index.aspx'  # GET/POST  项目列表url
    bd_li_url_temp = 'http://119.97.201.22:8083/search/spfxmcx/spfcx_lpb.aspx?DengJh={}'  # GET 楼栋列表url
    permit_url = 'http://119.97.201.22:8083/search/zzzz/zz_ysgsspfzb.aspx'  # GET/POST 预售预售许可证url

    def start_requests(self):
        ua = random.choice(self.settings['USER_AGENTS'])
        headers = {
            'User-Agent': ua,
            'Referer': self.start_urls[0],
        }
        yield scrapy.Request(
            self.eg_li_url,
            headers=headers,
        )

    def parse(self, response):
        """
        获取行政区/县
        :param response:
        :return:
        """
        # 获取页面参数
        VIEWSTATE = response.xpath("//input[@id='__VIEWSTATE']/@value").extract_first()
        EVENTVALIDATION = response.xpath("//input[@id='__EVENTVALIDATION']/@value").extract_first()
        tbtxt = response.xpath("//input[@id='tbtxt']/@value").extract_first()
        try:
            total_page_num = int(response.xpath("//div[@class='pages']/font[1]/text()").extract_first())
        except:
            total_page_num = 279
            logger.error('总页数获取出错，取默认值为279')
        # 构造请求参数
        page_num = 1
        data = {
            '__VIEWSTATE': VIEWSTATE,
            '__EVENTTARGET': 'AspNetPager1',
            '__EVENTARGUMENT': str(page_num),
            '__EVENTVALIDATION': EVENTVALIDATION,
            'tbtxt': urllib.parse.quote_plus(tbtxt, encoding='gb2312'),
            'DropDownList_xzq': '',
            'xmmc': '',
            'xmdz': '',
            'kfs': '',
            'AspNetPager1_input': str(1),
        }
        headers = {
            'Referer': self.eg_li_url,
            'Host': '119.97.201.22:8083',
            'Origin': 'http://119.97.201.22:8083',
        }
        yield scrapy.FormRequest(
            self.eg_li_url,
            formdata=data,
            headers=headers,
            callback=self.parse_project_li,
            meta=dict(page_num=deepcopy(page_num), total_page_num=deepcopy(total_page_num))
        )

    def parse_project_li(self, response):
        """
        获取项目列表
        :param response:
        :return:
        """
        page_num = copy(response.meta['page_num'])
        total_page_num = copy(response.meta['total_page_num'])
        project_li = response.xpath("//table[@id='tables']//tr[position()>1]")
        if len(project_li):
            # 遍历项目列表
            for project_tr in project_li:
                item_eg = FdcEstateGuidItem()
                item_eg['projectName'] = project_tr.xpath("./td[1]/a/text()").extract_first()
                item_eg['projectUrl'] = project_tr.xpath("./td[1]/a/@href").extract_first()
                item_eg['projectUrl'] = self._deal_project_url(response.request.url, item_eg['projectUrl'],
                                                               item_eg['projectName'])
                item_eg['totalRoomNum'] = project_tr.xpath("./td[2]/text()").extract_first()
                item_eg['totalSoldResidenceNum'] = self._clear_item(project_tr.xpath("./td[3]/text()").extract_first())
                item_eg['totalSaleableResidenceNum'] = self._clear_item(
                    project_tr.xpath("./td[4]/text()").extract_first())
                item_eg['totalSoldNonResidenceNum'] = self._clear_item(
                    project_tr.xpath("./td[5]/text()").extract_first())
                item_eg['totalSaleableNonResidenceNum'] = self._clear_item(
                    project_tr.xpath("./td[6]/text()").extract_first())
                # 构造项目请求，获取项目详情
                headers = {
                    'Referer': self.eg_li_url,
                    'Host': '119.97.201.22:8083',
                }
                if item_eg['projectUrl']:
                    yield scrapy.Request(
                        item_eg['projectUrl'],
                        headers=headers,
                        callback=self.parse_project_detail,
                        meta=dict(item_eg=deepcopy(item_eg)),
                    )
        else:
            logging.warning('第 {} 页项目列表获取为空'.format(page_num))

        # 翻页
        if page_num < total_page_num:
            page_num += 1
            # 获取页面参数
            VIEWSTATE = response.xpath("//input[@id='__VIEWSTATE']/@value").extract_first()
            EVENTVALIDATION = response.xpath("//input[@id='__EVENTVALIDATION']/@value").extract_first()
            tbtxt = response.xpath("//input[@id='tbtxt']/@value").extract_first()
            # 构造请求参数
            data = {
                '__VIEWSTATE': VIEWSTATE,
                '__EVENTTARGET': 'AspNetPager1',
                '__EVENTARGUMENT': str(page_num),
                '__EVENTVALIDATION': EVENTVALIDATION,
                'tbtxt': urllib.parse.quote_plus(tbtxt, encoding='gb2312'),
                'DropDownList_xzq': '',
                'xmmc': '',
                'xmdz': '',
                'kfs': '',
                'AspNetPager1_input': str(page_num - 1),
            }
            headers = {
                'Referer': self.eg_li_url,
                'Host': '119.97.201.22:8083',
                'Origin': 'http://119.97.201.22:8083',
            }
            yield scrapy.FormRequest(
                self.eg_li_url,
                formdata=data,
                headers=headers,
                callback=self.parse_project_li,
                meta=dict(page_num=deepcopy(page_num), total_page_num=deepcopy(total_page_num))
            )

    def parse_project_detail(self, response):
        """
        获取项目详情
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        item_eg['projectAddress'] = response.xpath("//span[@id='txt_xmzl']/text()").extract_first()
        item_eg['commencementDate'] = response.xpath("//span[@id='txt_kgsj']/text()").extract_first()
        item_eg['completionDate'] = response.xpath("//span[@id='txt_jgsj']/text()").extract_first()
        item_eg['siteArea'] = response.xpath("//span[@id='txt_ydmj']/text()").extract_first()
        item_eg['landUse'] = response.xpath("//span[@id='txt_tdyt']/text()").extract_first()
        item_eg['estateTotalArea'] = response.xpath("//span[@id='txt_jzmj']/text()").extract_first()
        item_eg['floorAreaRatio'] = response.xpath("//span[@id='txt_rjl']/text()").extract_first()
        item_eg['saleDate'] = response.xpath("//span[@id='txt_xssj']/text()").extract_first()
        item_eg['landUseRightNum'] = response.xpath("//span[@id='txt_gytdsyzh']/text()").extract_first()
        item_eg['constructionPermit'] = response.xpath("//span[@id='txt_sgxkz']/text()").extract_first()
        item_eg['qualificationCertificateNo'] = response.xpath("//span[@id='txt_kfqyzzzh']/text()").extract_first()
        item_eg['developerName'] = response.xpath("//span[@id='txt_kfqy']/text()").extract_first()
        item_eg['filingDepartment'] = response.xpath("//span[@id='txt_xmbajg']/text()").extract_first()
        item_eg['landGrade'] = response.xpath("//span[@id='txt_tddj']/text()").extract_first()
        item_eg['districtName'] = self._get_district(item_eg['filingDepartment'])
        item_eg['landUsageTerm'] = [response.xpath("//span[@id='txt_tdsynx']/text()").extract_first(),
                                    response.xpath("//span[@id='txt_tdsynx1']/text()").extract_first()]
        # 构造楼栋列表url
        bd_li_url = self._create_building_li_url(response.xpath("//input[@id='hide']/@value").extract_first(),
                                                 item_eg['projectName'])

        """获取预售许可证"""
        # step1：获取预售许可证请求参数
        headers = {
            'Referer': 'http://fgj.wuhan.gov.cn/bsfw_44/zxsbhcxxt/spfjjsyzfysxkz/',
            'Host': '119.97.201.22:8083',
        }
        yield scrapy.Request(
            self.permit_url,
            headers=headers,
            callback=self.parse_primit_params,
            meta=dict(item_eg=deepcopy(item_eg), bd_li_url=deepcopy(bd_li_url)),
            dont_filter=True,
        )

    def parse_primit_params(self, response):
        """
        获取预售许可证请求参数
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        bd_li_url = copy(response.meta['bd_li_url'])
        # 获取页面参数
        VIEWSTATE = response.xpath("//input[@id='__VIEWSTATE']/@value").extract_first()
        EVENTVALIDATION = response.xpath("//input[@id='__EVENTVALIDATION']/@value").extract_first()
        # 构造请求参数
        headers = {
            'Referer': self.permit_url,
            'Host': '119.97.201.22:8083',
        }
        data = {
            '__VIEWSTATE': VIEWSTATE,
            '__EVENTTARGET': '',
            '__EVENTARGUMENT': '',
            '__EVENTVALIDATION': EVENTVALIDATION,
            'txt_t': item_eg['projectName'],
            'txt_b': '查询',
            'AspNetPager1_input': str(1),
        }
        """获取预售许可证"""
        # step2：发送预售许可证查询请求，获取响应
        yield scrapy.FormRequest(
            self.permit_url,
            headers=headers,
            formdata=data,
            callback=self.parse_peimit,
            meta=dict(item_eg=deepcopy(item_eg), bd_li_url=deepcopy(bd_li_url)),
        )

    def parse_peimit(self, response):
        """
        获取预售许可证
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        bd_li_url = copy(response.meta['bd_li_url'])
        # 获取预售许可证列表分组
        permit_li = response.xpath("//tr[position()>1]")
        if permit_li:
            item_eg['preSalePermit'] = list()
            for permit_tr in permit_li:
                permit_str = permit_tr.xpath("./td[2]/text()").extract_first()
                item_eg['preSalePermit'].append(permit_str)
        else:
            item_eg['preSalePermit'] = None
        yield item_eg

        # 获取楼栋列表
        headers = {
            'Referer': item_eg['projectUrl'],
            'Host': '119.97.201.22:8083',
        }
        if bd_li_url:
            yield scrapy.Request(
                bd_li_url,
                headers=headers,
                callback=self.parse_building_li,
                meta=dict(item_eg=deepcopy(item_eg))
            )

    def parse_building_li(self, response):
        """
        获取楼栋列表
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        # 获取楼栋列表分组
        tr_li = response.xpath("//table[2]//tr")
        if len(tr_li) > 1:
            # 索引0为标题栏
            for tr in tr_li[1:]:
                item_bd = FdcBuildingItem()
                item_bd['districtName'] = item_eg['districtName']
                item_bd['projectName'] = item_eg['projectName']
                item_bd['blockName'] = tr.xpath("./td[1]/a/text()").extract_first()
                item_bd['buildingUrl'] = tr.xpath("./td[1]/a/@href").extract_first()
                item_bd['buildingUrl'] = self._deal_building_url(response.request.url, deepcopy(item_bd), )
                item_bd['buildingStructure'] = tr.xpath("./td[2]/text()").extract_first()
                item_bd['floorTotalNo'] = tr.xpath("./td[3]/text()").extract_first()
                item_bd['totalRoomNo'] = tr.xpath("./td[4]/text()").extract_first()
                yield item_bd

                # 获取房号列表
                if item_bd['buildingUrl']:
                    yield scrapy.Request(
                        item_bd['buildingUrl'],
                        callback=self.parse_room_li,
                        meta=dict(item_bd=deepcopy(item_bd), )
                    )
        else:
            logger.warning('{}  {} 楼栋列表获取为空'.format(response.request.url, item_eg['projectName']))

    def parse_room_li(self, response):
        """
        获取房号列表
        :param response:
        :return:
        """
        item_bd = copy(response.meta['item_bd'])
        # 获取楼层分组
        tr_li = response.xpath("//div[@id='fwxx']//tr")
        if len(tr_li):
            for tr in tr_li:
                bd_num = self._clear_num(tr.xpath("./td[1]/text()").extract_first())
                unit_num = self._clear_num(tr.xpath("./td[2]/text()").extract_first())
                floor_num = self._clear_num(tr.xpath("./td[3]/text()").extract_first())
                # 获取当前楼层的房号分组
                td_li = tr.xpath("./td[position()>3]")
                if td_li:
                    for td in td_li:
                        item_rm = FdcRoomItem()
                        item_rm['projectName'] = item_bd['projectName']
                        item_rm['blockName'] = item_bd['blockName']
                        item_rm['buildingNo'] = bd_num
                        item_rm['unitNo'] = unit_num
                        item_rm['roomFloor'] = floor_num
                        item_rm['roomNo'] = td.xpath("./a/text()").extract_first()
                        item_rm['roomUrl'] = td.xpath("./a/@href").extract_first()
                        item_rm['roomId'] = self._get_room_id(item_rm['roomUrl'])
                        item_rm['saleStatus'] = self._get_sale_status(td.xpath("./@style").extract_first())
                        if item_rm['roomUrl'] and '0000' not in item_rm['roomUrl']:
                            yield scrapy.Request(
                                item_rm['roomUrl'],
                                callback=self.parse_room,
                                meta=dict(item_rm=deepcopy(item_rm)),
                                priority=10,
                            )
                        else:
                            item_rm['roomLocation'] = None
                            item_rm['forecastBuildArea'] = None
                            item_rm['recordUnitPrice'] = None
                            yield item_rm
        else:
            logger.warning(
                '{}  {}-{} 房号列表获取为空'.format(response.request.url, item_bd['projectName'], item_bd['blockName']))

    def parse_room(self, response):
        """
        获取房号详情
        :param response:
        :return:
        """
        item_rm = copy(response.meta['item_rm'])
        flag = response.xpath("//h1[contains(text(),'暂无预售方案备案价格')]/text()").extract_first()
        if not flag:
            # 提取房号信息
            item_rm['roomLocation'] = response.xpath(
                "//td[contains(text(),'房屋座落')]/following-sibling::td[1]/text()").extract_first()
            item_rm['forecastBuildArea'] = response.xpath(
                "//td[contains(text(),'预测面积')]/following-sibling::td[1]/text()").extract_first()
            image_url = response.xpath(
                "//td[contains(text(),'预售方案备案单价')]/following-sibling::td[1]/img/@src").extract_first()
            try:
                image_name = image_url.split("=", maxsplit=1)[1]
                regex = re.compile(r'<|>|/|\\|\||:|"|\*|\?|')  # 去除文件名不允许使用的字符
                image_name = regex.sub('', image_name)
            except:
                item_rm['recordUnitPrice'] = None
            else:
                item_rm['recordUnitPrice'] = image_name + '.png'  # 暂存为图片名，完成图片爬取并进行图像识别后，再进行赋值
            yield item_rm

            # 构造图片下载请求
            try:
                image_url = urllib.parse.urljoin(response.request.url, image_url, )
                assert item_rm['recordUnitPrice']
            except:
                logger.error(
                    '{}-{}-{}-{} 房号价格图片url出错'.format(item_rm['projectName'], item_rm['blockName'], item_rm['roomFloor'],
                                                     item_rm['roomNo'], ))
            else:
                yield scrapy.Request(
                    image_url,
                    callback=self.parse_room_image,
                    meta=dict(image_name=deepcopy(item_rm['recordUnitPrice'])),
                    priority=10,
                )
        else:
            logger.warning(
                '{}  {}-{}-{}-{} 房号信息获取失败'.format(response.request.url, item_rm['projectName'], item_rm['blockName'],
                                                  item_rm['roomFloor'], item_rm['roomNo'], ))
            item_rm['roomLocation'] = None
            item_rm['forecastBuildArea'] = None
            item_rm['recordUnitPrice'] = None
            yield item_rm

    def parse_room_image(self, response):
        """
        获取房号价格图片
        :param response:
        :return:
        """
        image_name = copy(response.meta['image_name'])
        file_path = os.path.join(BASE_PATH, self.name)
        file_name = os.path.join(file_path, image_name)
        if not os.path.exists(file_path):
            os.mkdir(file_path)
        with open(file_name, 'wb') as f:
            f.write(response.body)

    @staticmethod
    def _clear_item(_item):
        """
        对数据进行初步清洗
        :param _item: <str> or <int>
        :return: <int> or None
        """
        try:
            item_int = int(_item)
            assert item_int
        except:
            return
        else:
            return item_int

    @staticmethod
    def _deal_project_url(_base, _url, _item):
        """
        处理项目url
        :param _base: 基本的url
        :param _url: 需处理的url
        :param _item: 项目名
        :return: <str> url
        """
        try:
            _url_li = _url.split('=')
            # '硚(硚口区)' 无法进行gb2312正常编码，需特殊处理
            if '硚' not in _url_li[1]:
                _url = _url_li[0] + '=' + urllib.parse.quote(_url_li[1], encoding='gb2312')
            else:
                _url = _url_li[0] + '=%B3~' + urllib.parse.quote(_url_li[1], encoding='gb2312', errors='ignore')
        except:
            logging.error('{}  {} 项目url处理出错'.format(_url, _item))
        else:
            return urllib.parse.urljoin(_base, _url)

    @staticmethod
    def _get_district(_str):
        """
        匹配行政区
        :param _str: <str> or None
        :return: <str> or None
        """
        district_li = ['江汉', '江岸', '硚口', '汉阳', '武昌', '洪山', '青山', '黄陂', '东西湖', '新洲', '蔡甸', '汉南', '江夏', '东湖高新开发',
                       '东湖风景', '经济技术开发', ]
        if _str:
            for district_str in district_li:
                if district_str in _str:
                    return district_str + '区'

    def _create_building_li_url(self, _str, _item):
        """
        构造楼栋列表url
        :param _str: <str>
        :param _item: 项目名
        :return: <str> url
        """
        try:
            # '硚(硚口区)' 无法进行gb2312正常编码，需特殊处理
            if '硚' not in _str:
                _url = self.bd_li_url_temp.format(urllib.parse.quote(_str, encoding='gb2312'))
            else:
                _url = self.bd_li_url_temp.format(
                    '%B3~' + urllib.parse.quote(_str, encoding='gb2312', errors='ignore'))
        except:
            logging.error('{} 项目楼栋列表url构造出错'.format(_item))
        else:
            return _url

    @staticmethod
    def _deal_building_url(_base, _item):
        """
        处理楼栋url
        :param _base: 基本的url
        :param _url: 楼栋对象
        :return: <str> url
        """
        _url = _item['buildingUrl']
        try:
            # '硚(硚口区)' 无法进行gb2312正常编码，需特殊处理
            if '硚' not in _url:
                right_str = _url.split('?')[0] + '?'
                left_li = _url.split('?')[1].split('&')
                params_dict = {i.split('=')[0]: i.split('=')[1] for i in left_li}
                new_url = right_str + urllib.parse.urlencode(params_dict, encoding='gb2312', errors='ignore')
            else:
                regex = re.compile(r'硚')
                new_url = regex.sub('%B3~', _url)
        except:
            logger.error('{}-{} 楼栋url编码出错'.format(_item['projectName'], _item['blockName']))
        else:
            return urllib.parse.urljoin(_base, new_url)

    @staticmethod
    def _clear_num(_str):
        """
        对房号数据进行初步清洗
        :param _str: <str> or None or <int>
        :return: <str> or None
        """
        try:
            num_str = str(_str)
        except:
            return
        else:
            return num_str if '/' not in num_str else None

    @staticmethod
    def _get_sale_status(_str):
        """
        解析房号销售状态
        :param _str: <str> or None
        :return: <str> or None
        """
        regex = re.compile(r':(.*)')
        try:
            ret = regex.findall(_str)
            assert len(ret) == 1
        except:
            return
        else:
            return WH_RM_STATUS_DICT.get(ret[0], None)

    @staticmethod
    def _get_room_id(_str):
        """
        提取房号id
        :param _str: <str> or None
        :return: <str> or None
        """
        try:
            ret = _str.split('=')
            assert ret[1]
        except:
            return
        else:
            return ret[1]
