# -*- coding: utf-8 -*-
# @Time    : 2019/11/15 10:41
# @Author  : ZSQ
# @Email   : zsq199170918@163.com
# @FileName: nanjing.py
# @Software: PyCharm
import scrapy
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from scrapy_redis.spiders import RedisCrawlSpider

import os
import re
import logging
import urllib.parse
from copy import copy, deepcopy
from FDC_spider.constants import NJ_RM_STATUS_DICT
from FDC_spider.items import FdcEstateGuidItem, FdcBuildingItem, FdcRoomItem

logger = logging.getLogger(__name__)


class NanjingSpider(RedisCrawlSpider):
    name = 'nanjing'
    allowed_domains = ['njhouse.com.cn']
    redis_key = 'nanjing'
    # start_urls = ['http://www.njhouse.com.cn/2019/spf/lists?dist=&use=&saledate=&per_name=']
    prt_li_temp_url = 'http://www.njhouse.com.cn/2019/spf/lists?use=&dist=&saledate=per_name=per_name=per_name=&page={}'  # GET   项目列表url
    permit_li_temp_url = 'http://www.njhouse.com.cn/2019/spf/persalereg_list?prjid={}'  # GET   许可证列表url
    bd_li_temp_url = 'http://www.njhouse.com.cn/2019/spf/sales?prjid={}'  # GET   楼栋列表url
    base_url = 'http://www.njhouse.com.cn/2019/'  # 用于url拼接

    rules = (
        Rule(
            LinkExtractor(restrict_xpaths="//div[contains(@class,'navs_block')]/a"),
            process_links="process_links",
        ),
        Rule(
            LinkExtractor(restrict_xpaths="//div[@class='spl_table']//td/span/a"),
            # LinkExtractor(restrict_xpaths="//div[@class='spl_table']//table[1]//td/span/a"),
            callback='parse_project',
            follow=False,
        ),
    )

    def parse_project(self, response):
        """
        获取项目详情
        :param response:
        :return:
        """
        item_eg = FdcEstateGuidItem()
        item_eg['projectUrl'] = response.request.url
        item_eg['projectId'] = re.findall(r'prjid=(.+)', item_eg['projectUrl'])[0]
        item_eg['projectName'] = response.xpath("//div[contains(@class,'spf_del_title')]/h2/text()").extract_first()
        item_eg['districtName'] = response.xpath("//div[contains(@class,'spf_del_title')]/h2//a/text()").extract_first()
        item_eg['projectAddress'] = response.xpath(
            "//td[text()='项目地址']/following-sibling::td[1]/text()").extract_first()
        item_eg['projectUse'] = response.xpath("//td[text()='用途']/following-sibling::td[1]/text()").extract_first()
        item_eg['developerName'] = response.xpath(
            "//td[text()='开发企业']/following-sibling::td[1]//text()").extract_first()
        item_eg['propertyCompany'] = response.xpath(
            "//td[text()='物业管理公司']/following-sibling::td[1]/text()").extract_first()
        item_eg['constructionUnit'] = response.xpath(
            "//td[text()='施工单位']/following-sibling::td[1]/text()").extract_first()
        item_eg['designUnit'] = response.xpath("//td[text()='建筑设计单位']/following-sibling::td[1]/text()").extract_first()
        item_eg['salesTel'] = response.xpath("//span[contains(text(),'销售热线')]/text()").extract_first()
        item_eg['salesTel'] = self.get_digit_str(item_eg['salesTel'])
        item_eg['totalRoomNo'] = response.xpath("//td[text()='入网总套数']/following-sibling::td[1]/text()").extract_first()
        item_eg['totalRoomNo'] = self.get_digit_str(item_eg['totalRoomNo'])
        item_eg['estateTotalArea'] = response.xpath(
            "//td[text()='入网总面积']/following-sibling::td[1]/text()").extract_first()
        item_eg['estateTotalArea'] = self.get_digit_str(item_eg['estateTotalArea'])
        item_eg['unsoldRoomNo'] = response.xpath(
            "//td[text()='未售总套数']/following-sibling::td[1]/text()").extract_first()
        item_eg['unsoldRoomNo'] = self.get_digit_str(item_eg['unsoldRoomNo'])
        item_eg['unsoldArea'] = response.xpath("//td[text()='未售总面积']/following-sibling::td[1]/text()").extract_first()
        item_eg['unsoldArea'] = self.get_digit_str(item_eg['unsoldArea'])
        item_eg['avgPrice'] = response.xpath("//td[text()='项目总均价']/following-sibling::td[1]/text()").extract_first()
        item_eg['avgPrice'] = self.get_digit_str(item_eg['avgPrice'])
        item_eg['soldRoomNo'] = response.xpath("//td[text()='总成交套数']/following-sibling::td[1]/text()").extract_first()
        item_eg['soldRoomNo'] = self.get_digit_str(item_eg['soldRoomNo'])
        item_eg['soldArea'] = response.xpath("//td[text()='总成交面积']/following-sibling::td[1]/text()").extract_first()
        item_eg['soldArea'] = self.get_digit_str(item_eg['soldArea'])
        # 构造许可证列表请求
        yield scrapy.Request(
            self.permit_li_temp_url.format(item_eg['projectId']),
            callback=self.parse_permit_li,
            meta=dict(item_eg=deepcopy(item_eg)),
        )

    def parse_permit_li(self, response):
        """
        获取许可证列表
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        try:
            permit_dict = copy(response.meta['permit_dict'])
            assert permit_dict
        except:
            permit_dict = dict()
        # 获取许可证分组
        table_li = response.xpath("//table")
        if table_li:
            for table in table_li:
                item_eg['preSalePermit'] = table.xpath(
                    ".//td[text()='编号']/following-sibling::td[1]/a/text()").extract_first()
                item_eg['districtName'] = table.xpath(
                    ".//td[text()='区属']/following-sibling::td[1]//text()").extract_first()
                item_eg['projectSubName'] = table.xpath(
                    ".//td[text()='项目名称']/following-sibling::td[1]//text()").extract_first()
                item_eg['certDate'] = table.xpath(
                    ".//td[text()='拟开盘时间']/following-sibling::td[1]//text()").extract_first()
                item_eg['projectAddress'] = table.xpath(
                    ".//td[text()='房屋坐落地点']/following-sibling::td[1]//text()").extract_first()
                item_eg['landUseRightNum'] = table.xpath(
                    ".//td[text()='土地使用证号']/following-sibling::td[1]//text()").extract_first()
                item_eg['landUsageTerm'] = table.xpath(
                    ".//td[text()='土地使用年限']/following-sibling::td[1]//text()").extract_first()
                permit_dict[item_eg['certDate']] = item_eg['preSalePermit']
                yield item_eg
        else:
            logger.error(
                '{}  {}-{} 许可证列表为空'.format(response.request.url, item_eg['districtName'], item_eg['projectName']))

        # 翻页
        next_page_url = response.xpath("//a[text()='下一页']/@href").extract_first()
        if next_page_url:
            yield response.follow(
                next_page_url,
                callback=self.parse_permit_li,
                meta=dict(item_eg=deepcopy(item_eg), permit_dict=deepcopy(permit_dict))
            )
        else:
            item_bd = FdcBuildingItem()
            item_bd['districtName'] = item_eg['districtName']
            item_bd['projectName'] = item_eg['projectName']
            item_bd['projectId'] = item_eg['projectId']
            # 构造楼栋列表请求
            yield scrapy.Request(
                self.bd_li_temp_url.format(item_eg['projectId']),
                callback=self.parse_building_li,
                meta=dict(item_bd=deepcopy(item_bd), permit_dict=permit_dict),
            )

    def parse_building_li(self, response):
        """
        获取楼栋列表
        :param response:
        :return:
        """
        item_bd = copy(response.meta['item_bd'])
        permit_dict = copy(response.meta['permit_dict'])
        # 获取楼栋分组
        bd_li = response.xpath("//div[@class='fdxs']/ul/li")
        if bd_li:
            for bd in bd_li:
                item_bd['blockName'] = bd.xpath("./div[@class='fdxs_left']/a/text()").extract_first()
                item_bd['buildingUrl'] = bd.xpath("./div[@class='fdxs_left']/a/@href").extract_first()
                item_bd['buildingUrl'] = urllib.parse.urljoin(self.base_url, item_bd['buildingUrl'])
                item_bd['buildingId'] = self.get_building_id(item_bd['buildingUrl'])
                item_bd['certDate'] = bd.xpath(
                    "./table[@class='fdxs_right']//tr[@class='fdxs-tr']//following-sibling::tr[1]/td[1]/text()").extract_first()
                item_bd['preSalePermit'] = permit_dict.get(item_bd['certDate'], None)
                item_bd['totalRoomNo'] = bd.xpath(
                    "./table[@class='fdxs_right']//tr[@class='fdxs-tr']//following-sibling::tr[1]/td[2]/text()").extract_first()
                item_bd['totalRoomNo'] = self.get_digit_str(item_bd['totalRoomNo'])
                item_bd['unsoldRoomNo'] = bd.xpath(
                    "./table[@class='fdxs_right']//tr[@class='fdxs-tr']//following-sibling::tr[1]/td[3]/text()").extract_first()
                item_bd['unsoldRoomNo'] = self.get_digit_str(item_bd['unsoldRoomNo'])
                item_bd['reserveRoomNo'] = bd.xpath(
                    "./table[@class='fdxs_right']//tr[@class='fdxs-tr']//following-sibling::tr[1]/td[4]/text()").extract_first()
                item_bd['reserveRoomNo'] = self.get_digit_str(item_bd['reserveRoomNo'])
                item_bd['soldRoomNo'] = bd.xpath(
                    "./table[@class='fdxs_right']//tr[@class='fdxs-tr']//following-sibling::tr[1]/td[5]/text()").extract_first()
                item_bd['soldRoomNo'] = self.get_digit_str(item_bd['soldRoomNo'])
                item_bd['avgPrice'] = bd.xpath(
                    "./table[@class='fdxs_right']//tr[@class='fdxs-tr']//following-sibling::tr[1]/td[7]/text()").extract_first()
                item_bd['avgPrice'] = self.get_digit_str(item_bd['avgPrice'])
                item_bd['transactionRatio'] = bd.xpath(
                    "./table[@class='fdxs_right']//tr[@class='fdxs-tr']//following-sibling::tr[1]/td[8]/text()").extract_first()
                yield item_bd
                # 构造房号列表请求
                yield scrapy.Request(
                    item_bd['buildingUrl'],
                    # callback=self.parse_room_li,  # 2019/12/03网站反爬升级，已弃用
                    callback=self.parse_room_li_v2,
                    meta=dict(item_bd=deepcopy(item_bd)),
                )
        else:
            logger.error(
                '{}  {}-{} 楼栋列表为空'.format(response.request.url, item_bd['districtName'], item_bd['projectName']))

    def parse_room_li_v2(self, response):
        """
        version 2.0
        获取房号列表
        :param response:
        :return:
        """
        item_bd = copy(response.meta['item_bd'])
        # 获取房号分组
        td_li = response.xpath("//table[@class='ck_table']//tr[td]/td/a[text()!='']/..")
        if td_li:
            for td in td_li:
                item_rm = FdcRoomItem()
                item_rm['projectId'] = item_bd['projectId']
                item_rm['blockName'] = item_bd['blockName']
                item_rm['buildingId'] = item_bd['buildingId']
                item_rm['roomNo'] = td.xpath("./a[1]/text()").extract_first()
                item_rm['roomUse'] = td.xpath("./@title").extract_first()
                item_rm['roomUrl'] = td.xpath("./a[1]/@href").extract_first()
                item_rm['roomUrl'] = self.get_full_url_v2(self.base_url, item_rm['roomUrl'])
                item_rm['roomFloor'] = self.get_floor_num_v2(item_rm['roomUrl'])
                status_str = td.xpath("./@class").extract_first()
                item_rm['saleStatus'] = self.get_room_status_v2(status_str)
                area_url = td.xpath("./a[2]/img[1]/@src").extract_first()
                area_url = self.get_full_url_v2(self.base_url, area_url)
                price_url = td.xpath("./a[2]/img[2]/@src").extract_first()
                price_url = self.get_full_url_v2(self.base_url, price_url)
                if area_url or price_url:
                    if area_url:
                        image_name = '_'.join(
                            [item_rm['projectId'], item_rm['buildingId'], item_rm['roomFloor'], item_rm['roomNo'], 'S'])
                        item_rm['roomArea'] = image_name
                        yield scrapy.Request(
                            area_url,
                            callback=self.parse_images,
                            meta=dict(image_name=deepcopy(image_name), file_name=deepcopy(item_rm['projectId'])),
                        )
                    if price_url:
                        image_name = '_'.join(
                            [item_rm['projectId'], item_rm['buildingId'], item_rm['roomFloor'], item_rm['roomNo'], 'P'])
                        item_rm['unitPrice'] = image_name
                        yield scrapy.Request(
                            price_url,
                            callback=self.parse_images,
                            meta=dict(image_name=deepcopy(image_name), file_name=deepcopy(item_rm['projectId'])),
                        )
                    if not area_url:
                        item_rm['roomArea'] = None
                    if not price_url:
                        item_rm['unitPrice'] = None
                else:
                    text_str_li = td.xpath("./a[2]/text()").extract()
                    if text_str_li:
                        item_rm['roomArea'], item_rm['unitPrice'] = self.get_area_and_price_v2(text_str_li)
                    else:
                        item_rm['roomArea'] = None
                        item_rm['unitPrice'] = None
                yield item_rm
        else:
            logger.error(
                '{}  {}-{}-{}-{} 房号列表获取为空'.format(response.request.url, item_bd['districtName'], item_bd['projectName'],
                                                  item_bd['certDate'], item_bd['blockName']))

    def parse_images(self, response):
        """
        获取图片并存储
        :param response:
        :return:
        """
        image_name = copy(response.meta['image_name'])
        file_name = copy(response.meta['file_name'])
        file_base_path = './FDC_spider/images/nanjing/room/' + file_name
        self.make_dirs_v2(file_base_path)
        file_path = file_base_path + '/' + image_name + '.png'
        with open(file_path, 'wb') as f:
            f.write(response.body)

    # 2019/12/03网站反爬升级，已弃用
    def parse_room_li(self, response):
        """
        获取房号列表
        :param response:
        :return:
        """
        item_bd = copy(response.meta['item_bd'])
        # 获取楼层分组
        tr_li = response.xpath("//table[@class='ck_table']//tr[td]")
        if tr_li:
            item_rm = FdcRoomItem()
            item_rm['projectName'] = item_bd['projectName']
            item_rm['projectId'] = item_bd['projectId']
            item_rm['blockName'] = item_bd['blockName']
            item_rm['buildingId'] = item_bd['buildingId']
            for tr in tr_li:
                # 获取当前楼层的房号分组
                td_li = tr.xpath("./td/a[text()!='']/..")
                for td in td_li:
                    item_rm['roomUrl'] = td.xpath("./a[1]/@href").extract_first()
                    yield response.follow(
                        item_rm['roomUrl'],
                        callback=self.parse_room_detail,
                        meta=dict(item_rm=deepcopy(item_rm)),
                    )
        else:
            logger.error(
                '{}  {}-{}-{}-{} 楼层获取为空'.format(response.request.url, item_bd['districtName'], item_bd['projectName'],
                                                item_bd['certDate'], item_bd['blockName']))

    # 2019/12/03网站反爬升级，已弃用
    def parse_room_detail(self, response):
        """
        获取房号详情
        :param response:
        :return:
        """
        item_rm = copy(response.meta['item_rm'])
        item_rm['roomUrl'] = response.request.url
        item_rm['roomLocation'] = response.xpath("//em/text()").extract_first()
        item_rm['roomFloor'] = response.xpath("//td[text()='楼层']/following-sibling::td[1]/text()").extract_first()
        item_rm['roomFloor'] = self.get_floor_num(item_rm['roomFloor'])
        item_rm['roomNo'] = response.xpath("//td[text()='房号']/following-sibling::td[1]/text()").extract_first()
        item_rm['forecastBuildArea'] = response.xpath(
            "//td[text()='预测建筑面积']/following-sibling::td[1]/text()").extract_first()
        item_rm['forecastBuildArea'] = self.get_digit_str(item_rm['forecastBuildArea'])
        item_rm['forecastInnerArea'] = response.xpath(
            "//td[text()='预测套内面积']/following-sibling::td[1]/text()").extract_first()
        item_rm['forecastInnerArea'] = self.get_digit_str(item_rm['forecastInnerArea'])
        item_rm['forecastSharedArea'] = response.xpath(
            "//td[text()='预测分摊面积']/following-sibling::td[1]/text()").extract_first()
        item_rm['forecastSharedArea'] = self.get_digit_str(item_rm['forecastSharedArea'])
        item_rm['roomUse'] = response.xpath("//td[text()='房屋类型']/following-sibling::td[1]/text()").extract_first()
        item_rm['saleStatus'] = response.xpath("//td[text()='销售状态']/following-sibling::td[1]/text()").extract_first()
        item_rm['unitPrice'] = response.xpath("//td[text()='参考价格']/following-sibling::td[1]/text()").extract_first()
        item_rm['unitPrice'] = self.get_digit_str(item_rm['unitPrice'])
        yield item_rm

    def process_links(self, links):
        """
        处理url
        :param links:
        :return:
        """
        for link in links:
            page_num_li = re.findall(r'page=(\d+)', link.url)
            if page_num_li:
                link.url = self.prt_li_temp_url.format(page_num_li[0])
                yield link

    @staticmethod
    def get_digit_str(_str):
        regex_1 = re.compile(r'\d+')
        regex_2 = re.compile(r'\d+.*\d+')
        if _str:
            ret_1 = regex_1.findall(_str)
            ret_2 = regex_2.findall(_str)
            if len(ret_1) > 1:
                return ret_2[0]
            elif len(ret_1) == 1:
                return ret_1[0]

    @staticmethod
    def get_building_id(_str):
        regex = re.compile(r'buildid=(\d+)')
        if _str:
            ret = regex.findall(_str)
            return ret[0] if ret else None

    # 2019/12/03网站反爬升级，已弃用
    @staticmethod
    def get_floor_num(_str):
        regex = re.compile(r'-\d+|\d+')
        if _str:
            ret = regex.findall(_str)
            return ret[0] if ret else _str

    @staticmethod
    def get_full_url_v2(_base, _url):
        return urllib.parse.urljoin(_base, _url) if _url else None

    @staticmethod
    def get_floor_num_v2(_str):
        regex = re.compile(r'layerno1=(.*)&')
        if _str:
            ret = regex.findall(_str)
            return ret[0] if ret else _str

    @staticmethod
    def get_room_status_v2(_str):
        return NJ_RM_STATUS_DICT.get(_str, None)

    @staticmethod
    def get_area_and_price_v2(_li):
        regex_area = re.compile(r'面积：(\d+\.\d+|\d+)')
        regex_price = re.compile(r'价格：(\d+\.\d+|\d+)')
        if _li:
            _str = ','.join(_li)
            area_li = regex_area.findall(_str)
            price_li = regex_price.findall(_str)
            area = area_li[0] if area_li else None
            price = price_li[0] if price_li else None
            return area, price
        return None, None

    @staticmethod
    def make_dirs_v2(_file):
        if not os.path.exists(_file):
            os.makedirs(_file)

    @staticmethod
    def deal_proxy_ip_v2(_proxy, code1, code2):  # 暂时没用
        regex = re.compile(r'://(.*)')
        proxy_ip = regex.findall(_proxy)[0]
        proxy_dict = {
            "http": "http://{}:{}{}@{}".format(code1, code1, code2, proxy_ip),
            "https": "https://{}:{}{}@{}".format(code1, code1, code2, proxy_ip)
        }
        return proxy_dict['http'] if 'http' in _proxy else proxy_dict['https']