# -*- coding: utf-8 -*-
import scrapy

import logging
import urllib.parse
from pprint import pprint
from copy import deepcopy, copy
from FDC_spider.items import FdcEstateGuidItem, FdcBuildingItem, FdcRoomItem

logger = logging.getLogger(__name__)


class KunmingSpider(scrapy.Spider):
    name = 'kunming'
    allowed_domains = ['kmhouse.org']
    start_urls = ['http://www.kmhouse.org/moreHousePriceList.asp?page=1&']
    # estate_li_url = 'http://www.kmhouse.org/newhouse/more_loupanInfo.asp'  # POST    楼盘列表url
    pre_sale_permit_url = 'http://www.kmhouse.org/lqt/selllicensedisp.asp?&from=androidqq'  # POST    预售许可证url

    def start_requests(self):
        yield scrapy.Request(
            KunmingSpider.start_urls[0],
            callback=self.parse_project
        )

    def parse_project(self, response):
        table_li = response.xpath("//table[@class='kuanyou']/following-sibling::table")
        for table in table_li:
            item_eg = FdcEstateGuidItem()
            item_eg['projectName'] = table.xpath(".//a/text()").extract_first()
            estate_url = table.xpath(".//a/@href").extract_first()
            item_eg['estateUrl'] = urllib.parse.urljoin(response.request.url, estate_url)

            # 构造预售许可证查询参数
            project_name = item_eg['projectName'].encode('GB18030')
            data = dict(
                theName=project_name,
                theAddress='',
                Submit='提交',
                thePage=str(1),
            )
            yield scrapy.FormRequest(
                KunmingSpider.pre_sale_permit_url,
                formdata=data,
                callback=self.parse_project_detail,
                meta={'item_eg': deepcopy(item_eg)},
            )

        # 翻页
        next_url = response.xpath("//a[text()='下一页']//@href").extract_first()
        if next_url:
            yield response.follow(
                next_url,
                callback=self.parse_project,
            )

    def parse_project_detail(self, response):
        """
        获取项目详情信息
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        # 获取项目分组
        table_li = response.xpath("//td[text()='项目名称：{}']/../..".format(item_eg['projectName']))
        if len(table_li) > 0:
            for table in table_li:
                try:
                    estateAddress = table.xpath("./tr[2]/td[1]/text()").extract_first()
                    estateAddress = estateAddress.split('：')[1].strip()
                    item_eg['estateAddress'] = estateAddress
                except Exception as e:
                    logger.warning('{}获取项目地址失败:{}'.format(item_eg['projectName'], e))
                    item_eg['estateAddress'] = None
                try:
                    developerName = table.xpath("./tr[2]/td[2]/text()").extract_first()
                    developerName = developerName.split('：')[1].strip()
                    item_eg['developerName'] = developerName
                except Exception as e:
                    logger.warning('{}获取开发商信息失败:{}'.format(item_eg['projectName'], e))
                    item_eg['developerName'] = None
                try:
                    CA = table.xpath("./tr[3]/td[1]/text()").extract_first()
                    CA = CA.split('：')[1].strip()
                    item_eg['CA'] = CA
                except Exception as e:
                    logger.warning('{}获取发证机构信息失败:{}'.format(item_eg['projectName'], e))
                    item_eg['CA'] = None
                try:
                    preSalePermit = table.xpath("./tr[3]/td[2]/text()").extract_first()
                    preSalePermit = preSalePermit.split('：')[1].strip()
                    item_eg['preSalePermit'] = preSalePermit
                except Exception as e:
                    logger.warning('{}获取预售许可证失败:{}'.format(item_eg['projectName'], e))
                    item_eg['preSalePermit'] = None
                try:
                    certDate = table.xpath("./tr[4]/td[1]/text()").extract_first()
                    certDate = certDate.split('：')[1].strip()
                    item_eg['certDate'] = certDate
                except Exception as e:
                    logger.warning('{}获取发证日期失败:{}'.format(item_eg['projectName'], e))
                    item_eg['certDate'] = None
                try:
                    preSaleArea = table.xpath("./tr[4]/td[2]/text()").extract_first()
                    preSaleArea = preSaleArea.split('：')[1].strip()
                    item_eg['preSaleArea'] = preSaleArea
                except Exception as e:
                    logger.warning('{}获取预售面积失败:{}'.format(item_eg['projectName'], e))
                    item_eg['preSaleArea'] = None
        else:
            logger.info('{}无法获取项目详细信息'.format(item_eg['projectName']))
            item_eg['estateAddress'] = None
            item_eg['developerName'] = None
            item_eg['CA'] = None
            item_eg['preSalePermit'] = None
            item_eg['certDate'] = None
            item_eg['preSaleArea'] = None
        yield item_eg

        yield scrapy.Request(
            item_eg['estateUrl'],
            callback=self.parse_project_bd_li,
            meta={'item_eg': deepcopy(item_eg)},
        )

    def parse_project_bd_li(self, response):
        """
        获取楼栋列表
        :param response:
        :return:
        """
        item_eg = copy(response.meta['item_eg'])
        # 获取楼栋分组
        option_li = response.xpath("//select[@id='bid']/option")[1:]
        if len(option_li) > 0:
            item_bd = FdcBuildingItem()
            item_bd['projectName'] = item_eg['projectName']
            item_bd['developerName'] = item_eg['developerName']
            for option in option_li:
                item_bd['blockName'] = option.xpath("./text()").extract_first()
                item_bd['buildingId'] = option.xpath("./@value").extract_first()
                yield item_bd

                # 构造请求参数
                mess = response.xpath(".//input[@name='mess']/@value").extract_first()
                aid = response.xpath(".//input[@name='aid']/@value").extract_first()
                preid = response.xpath(".//input[@name='preid']/@value").extract_first()
                prename = response.xpath(".//input[@name='prename']/@value").extract_first()
                data = dict(
                    mess=mess,
                    aid=aid,
                    preid=preid,
                    prename=prename,
                    issearch='yes',
                    bid=item_bd['buildingId'],
                )
                yield scrapy.FormRequest(
                    response.request.url,
                    formdata=data,
                    callback=self.parse_project_rm_li,
                    meta={'item_bd': deepcopy(item_bd)},
                )
        else:
            logger.info('{}无法获取楼栋列表信息'.format(item_eg['projectName']))

    def parse_project_rm_li(self, response):
        """
        获取房号列表（楼栋详情）
        :param response:
        :return:
        """
        item_bd = copy(response.meta['item_bd'])
        # 获取房号列表分组
        tr_li = response.xpath("//th[text()='房号']/../following-sibling::tr")
        if len(tr_li) > 0:
            for tr in tr_li[0:1]:
                item_rm = FdcRoomItem()
                item_rm['projectName'] = item_bd['projectName']
                item_rm['developerName'] = item_bd['developerName']
                item_rm['blockName'] = item_bd['blockName']
                item_rm['unitNo'] = tr.xpath("./td[2]//text()").extract_first()
                item_rm['unitNo'] = item_rm['unitNo'] if item_rm['unitNo'] != '无' else None
                item_rm['roomNo'] = tr.xpath("./td[3]//text()").extract_first()
                yield item_rm

                # TODO (因该网站房号url响应全部为404，目前无法爬取房号详细信息)

        # 翻页
        next_url = response.xpath("//a[text()='下一页']/@href").extract_first()
        if next_url:
            yield response.follow(
                next_url,
                callback=self.parse_project_rm_li,
                meta={'item_bd': deepcopy(item_bd)},
            )
