# -*- coding: utf-8 -*-
import sys

reload(sys)
sys.setdefaultencoding('utf8')
import scrapy
import re
import json
from scrapy import Request, FormRequest, Spider
from bs4 import BeautifulSoup
from urlparse import urljoin
from realEstateCommittee.items import RealestatecommitteeItem


class TedSpider(Spider):
    name = "black_widow"

    CITY = "南充"
    start_urls = ["http://www.ncfdj.gov.cn/projectQuery/search.jspx"]

    REGI_ID = "regi_id"
    buildings_url = "http://www.ncfdj.gov.cn/projectQuery/getBuildings.jspx"

    PRE_NO = "pre_no"  # 预售栋数
    BELONG_CITY = "belong_city"  # "所在区县",
    ALIAS = "alias"  # "推广名称/别名"
    PROJECT_ADDR = "project_addr"  # "项目地址",
    PRESALE_PERMIT = "presale_permit"  # "预售证号码",
    BUILD_NO = "build_no"  # "本期住宅楼号/幢号",
    RESIDENCE_SET = "residence_set"  # "本期住宅总套数",
    RESIDENCE_AREA = "residence_area"  # "本期住宅总面积",
    PROPERTY_TYPE = "property_type"  # "物业类型/房屋类型",
    BUILDING_TYPE = "building_type"  # "建筑类型",
    OPEN_TIME = "open_time"  # "开盘时间",
    COMPLETE_TIME = "complete_time"  # "竣工时间",
    DEVELOPER = "developer"  # "开发商名称",
    BUILDING_NO = "building_no"  # "楼号/幢号",
    UNIT_NO = "unit_no"  # "单元号",
    FLOOR = "floor"  # "楼层",
    HOUSE_NO = "house_no"  # "房号",
    SALE_STATUS = "sale_status"  # "销售状态"

    def make_requests_from_url(self, url):
        meta = dict()
        return Request(url, meta=meta, dont_filter=True)

    def parse(self, response):
        bs_obj = BeautifulSoup(response.body, "html5lib")
        meta = response.meta

        for row in bs_obj.find("table", class_="tab-list").find_all("tr")[1:]:
            columns = [item.get_text() for item in row.find_all("td")[0:5]]

            meta[self.PRESALE_PERMIT] = columns[0]
            meta[self.ALIAS] = columns[1]
            meta[self.PROJECT_ADDR] = columns[2]
            meta[self.DEVELOPER] = columns[3]
            meta[self.RESIDENCE_SET] = columns[4]

            self.logger.info("预售证号码:%s;推广名称/别名:%s;项目地址%s;开发商名称:%s,本期住宅总套数:%s;"
                             % (meta[self.PRESALE_PERMIT], meta[self.ALIAS],
                                meta[self.PROJECT_ADDR], meta[self.DEVELOPER], meta[self.RESIDENCE_SET]))

            # 取查看链接
            project_detail_url = urljoin(response.url, row.find_all("td")[5].a.attrs['href'])
            # 取楼盘表连接
            meta[self.REGI_ID] = re.findall("regiId=(.*?)&", row.find_all("td")[6].a.attrs['href'])
            yield Request(project_detail_url, callback=self.parse_detail, meta=meta, dont_filter=True)

        # 下一页链接
        next_tag = bs_obj.find("a", attrs={'title': u"下一页", "onclick": True, "disabled": None})

        if next_tag:
            next_url = "http://www.ncfdj.gov.cn/projectQuery/" \
                       "search.jspx?page=%s" % re.findall("'(.*?)'", next_tag.attrs["onclick"])[0]
            yield scrapy.Request(url=next_url, meta=meta, dont_filter=True)
        pass

    def parse_detail(self, response):
        bs_obj = BeautifulSoup(response.body, "html5lib")
        meta = response.meta
        content = bs_obj.find("table", class_="tab-list table-view").find_all("td", class_=None)

        # 预售幢数
        meta[self.PRE_NO] = content[2].get_text()
        # 行政区
        meta[self.BELONG_CITY] = content[5].get_text()
        # 开工日期
        meta[self.OPEN_TIME] = content[8].get_text()
        # 竣工日期
        meta[self.COMPLETE_TIME] = content[9].get_text()
        # 总套数
        meta[self.RESIDENCE_SET] = content[10].get_text()
        # 房屋类型
        meta[self.PROPERTY_TYPE] = content[11].get_text()
        # 建筑类型
        meta[self.BUILDING_TYPE] = content[12].get_text()
        # 建筑面积
        meta[self.RESIDENCE_AREA] = content[16].get_text()

        self.logger.info("预售幢数:%s,行政区:%s,开工日期:%s,竣工日期:%s," % (meta[self.PRE_NO], meta[self.BELONG_CITY],
                                                              meta[self.OPEN_TIME], meta[self.COMPLETE_TIME],))
        self.logger.info("总套数:%s,房屋类型:%s,建筑类型:%s,建筑面积:%s," % (meta[self.RESIDENCE_SET], meta[self.PROPERTY_TYPE],
                                                              meta[self.BUILDING_TYPE], meta[self.RESIDENCE_AREA]))
        form_data = dict()
        form_data["regiId"] = meta[self.REGI_ID]
        yield FormRequest(url=self.buildings_url, callback=self.parse_building, formdata=form_data, meta=meta,
                          dont_filter=True)

    def parse_building(self, response):
        meta = response.meta
        json_body = json.loads(response.body)
        house_resouce_url = "http://www.ncfdj.gov.cn/projectQuery/getHouResource.jspx"
        form_data = dict()
        form_data['id'] = str(meta[self.REGI_ID])

        for building in json_body['data']:
            buidling_uuid = building['uuid']
            form_data['buildUuid'] = buidling_uuid
            meta[self.BUILDING_NO] = building["buildingName"]
            for unit in building['unitList']:
                unitid = unit['unitId']
                if unitid:
                    form_data['unitId'] = unitid
                else:
                    form_data['unitId'] = ""
                meta[self.UNIT_NO] = form_data["unitId"]
                yield FormRequest(house_resouce_url, formdata=form_data, callback=self.parse_house_resource,
                                  meta=meta, dont_filter=True)
        pass

    def parse_house_resource(self, response):
        meta = response.meta
        json_body = json.loads(response.body)
        for house in json_body["data"]:
            meta[self.HOUSE_NO] = house['number']
            meta[self.FLOOR] = house['floor']
            meta[self.SALE_STATUS] = house['status']
            self.logger.info("栋号：%s,单元号%s,房号:%s, 楼层:%s, 销售状态:%s" % (meta[self.BUILDING_NO], meta[self.UNIT_NO],
                                                                    meta[self.HOUSE_NO], meta[self.FLOOR],
                                                                    meta[self.SALE_STATUS]))
        yield self.meta_to_item(meta)
        pass

    def meta_to_item(self, meta):
        item = RealestatecommitteeItem()
        for field in RealestatecommitteeItem.fields:
            if field not in meta.keys():
                item[field] = ""
            else:
                item[field] = meta[field]
        return item
