# -*- coding: utf-8 -*-
from crawler.items import *
from scrapy_redis.spiders import RedisSpider
from crawler.items import *
from crawler.core.service_owner import *
from crawler.core.dict_xpath import *
from crawler.core.dict_url_city import *
from crawler import settings
import scrapy, re, logging, json, pymysql, redis, time, sched

class EsfDetailByOwner(RedisSpider):
    """
    @author Hongv
    @desc 爬去58二手房个人房源
    """

    name = "ershoufang.owner.58.detail"
    # 从redis里面读要爬取的url
    redis_key = settings.REDIS_DETAILLINK_CSOWNER
    custom_settings = settings.MIDDLEWARES_OWNER_SECOND

    def __init__(self, *args, **kwargs):
        super(EsfDetailByOwner, self).__init__(*args, **kwargs)
        self.esf_service = EsfOwnerService()

    def make_requests_from_url(self, url):
        url, city_code = url.split('<!>')[0], url.split('<!>')[-1]
        meta = {
            'download_timeout': settings.MIDDLEWARES_OWNER_SECOND.get('DOWNLOAD_TIME', 3),
            'first_call': url,
            'city_code':city_code,
            'referer': url.split('ershoufang')[0] + 'ershoufang/0/'
        }
        return scrapy.Request(url, dont_filter=True, meta=meta)

    def rebuild_request(self, response, call_back):
        # 多次未请求成功的放入redis库
        if response.meta.get('verifyTimes', 0)>1:
            url = '%s<!>%s' % (response.meta.get('first_call'), response.meta.get('city_code', ''))
            self.esf_service.insert_info_redis(url, table=settings.REDIS_DETAILLINK_CSOWNER_OUT)
            logging.exception('\n*** ***\n GIVE UP URL \n {0}\nn*** ***'.format(url))
            return False
        # 请求被重定向到验证页面，重新发送请求
        proxy = re.search(r'\d+\.\d+\.\d+\.\d+:\d+', response.meta.get('proxy', '')).group()
        meta = {
            'download_timeout': response.meta.get('download_timeout', 3),
            'first_call': response.meta.get('first_call'),
            'referer': response.meta.get('referer', ''),
            'city_code': response.meta.get('city_code', ''),
            'proxy': proxy,
            'verifyTimes': response.meta.get('verifyTimes', 0) + 1
        }
        return scrapy.Request(response.meta.get('first_call'), meta=meta, dont_filter=True, callback=call_back)

    def parse(self, response):
        if 'verifycode' in response.url or 'callback' in response.url:
            new_request = self.rebuild_request(response, self.parse)
            if new_request: yield self.rebuild_request(response, self.parse)
        else:
            try:
                houseinfo = EsfOfOwner()
                house_id = re.search(r'ershoufang/(\d+)x', response.url).group(1)
                base_url = re.search(r'\w+\.58\.com', response.url).group()
                title = response.xpath(ESF_OWNER_58['detail_title']).extract_first()
                wechat_link = response.xpath(ESF_OWNER_58['wechat_link']).extract_first()
                address_bar = response.xpath(ESF_OWNER_58['address_bar']).extract()
                decoration = response.xpath(ESF_OWNER_58['decoration']).extract_first()
                construction_date = response.xpath(ESF_OWNER_58['construction_date']).extract_first()
                address = response.xpath(ESF_OWNER_58['detail_address']).extract()
                publisher_phone = response.xpath(ESF_OWNER_58['publisher_phone']).extract_first()
                images_1 = response.xpath(ESF_OWNER_58['images_1']).extract()
                images_2 = response.xpath(ESF_OWNER_58['images_2']).extract()
                detail_baseinfo = response.xpath(ESF_OWNER_58['detail_baseinfo'])

                houseinfo['house_id'] = house_id
                houseinfo['web_belong'], houseinfo['city_code'] = ESF_OWNER_58['web'], response.meta.get('city_code')
                houseinfo['title'], houseinfo['wechat_link'], houseinfo['publisher_phone'] = title, wechat_link, publisher_phone
                houseinfo['decoration'] = decoration.replace(' ', '').replace('\n', '')
                houseinfo['construction_date'] = construction_date.replace(' ', '').replace('\n', '')
                houseinfo['address'] = ''.join(address).replace(' ', '').replace('\n', '')
                # 地级市 Prefectural-Level city
                pl_city = PLCITY_TO_URL.get(base_url, '')
                if pl_city and pl_city in address_bar[0]:
                    houseinfo['district_belong'] = pl_city
                    houseinfo['business_belong'] = address_bar[-1].replace('二手房', '').replace(' ', '')
                else:
                    houseinfo['district_belong'] = address_bar[1].replace('二手房', '').replace(' ', '')
                    houseinfo['business_belong'] = address_bar[-1].replace('二手房', '').replace(' ', '')
                # 判断区与商圈取到的是否为同一个值，若是，说明没有商圈，需更改
                if houseinfo['district_belong'] == houseinfo['business_belong']:
                    houseinfo['business_belong'] = None
                if pl_city == '章丘' and houseinfo['business_belong'] != '章丘城区':
                    houseinfo['business_belong'] = houseinfo['business_belong'].replace('城区', '')
                # 图片集
                images = images_1 if len(images_1) > 0 else images_2
                picture = []
                for p in images:
                    res = re.sub('w=\d+&h=\d+', 'w=700&h=480', p)
                    picture.append(res)
                houseinfo['picture'] = json.dumps(picture)
                # 部分加密信息
                response_ = self.esf_service.replace_font(response)
                houseinfo['total_price'] = response_.xpath('//span[@class="price strongbox"]/text()')[0]
                houseinfo['unit_price'] = response_.xpath('//span[@class="unit strongbox"]/text()')[0]
                # 房源描述
                desc_text = []
                description = response.xpath('//div[@class="genaral-pic-desc"]')
                for des in description:
                    tag = des.xpath('span[@class="pic-desc-item"]/text()').extract_first()
                    tag = '[%s]' % tag.replace('\n', '').replace(' ', '') if tag else ''
                    text = des.xpath('p[@class="pic-desc-word"]/text()').extract_first().replace('\n', '').replace(' ',
                                                                                                                   '')
                    desc_word = '%s<br>%s' % (tag, text) if tag else text
                    desc_text.append(desc_word)
                houseinfo['description'] = '<br>'.join(desc_text)
                # 部分基础信息
                for g in detail_baseinfo:
                    item_name = g.xpath('span[1]/text()').extract_first()
                    item_value = g.xpath('span[2]/text()').extract_first()
                    if not item_value:
                        continue
                    if item_name == '产权年限':
                        houseinfo['property_year'] = item_value.replace(' ', '').replace('\n','')
                    if item_name == '房屋类型':
                        houseinfo['architecturaltype'] = item_value.replace(' ', '').replace('\n','')
                    if item_name == '交易权属':
                        houseinfo['property_type'] = item_value.replace(' ', '').replace('\n','')
                if not houseinfo['title'] or (not houseinfo['publisher_phone'] and not houseinfo['wechat_link']) or not \
                houseinfo['total_price'] or not houseinfo['unit_price']:
                    logging.exception('\n*** ***\n Info Not Complete\n*** ***')
                yield houseinfo
            except Exception as e:
                logging.exception('\n*** ***\n ershoufang 58 owner \n {0}\n Exception {1}\n*** ***'.format(house_id, str(e)))