# -*- coding: utf-8 -*-
from crawler.items import *
from scrapy_redis.spiders import RedisSpider
from crawler.core.service_owner import *
from crawler.core.dict_xpath import *
from crawler.core.dict_url_city import *
from crawler import settings
from lxml import etree
import scrapy, re, logging, json, pymysql, redis, time, sched

class RentDetailByOwner(RedisSpider):

    name = "chuzu.owner.58.detail"
    custom_settings = settings.MIDDLEWARES_RENT_SECOND
    # 从redis里面读url
    redis_key = settings.REDIS_DETAILLINK_CZOWNER

    def __init__(self, *args, **kwargs):
        super(RentDetailByOwner, self).__init__(*args, **kwargs)
        self.rent_service = RentOwnerService()

    def make_requests_from_url(self, url):
        url, city_code = url.split('<!>')[0], url.split('<!>')[-1]
        meta = {
            'download_timeout': settings.MIDDLEWARES_RENT_SECOND.get('DOWNLOAD_TIME', 3),
            'first_call': url,
            'city_code': city_code,
            'referer': url.split('chuzu')[0] + 'chuzu/0/'
        }
        return scrapy.Request(url, dont_filter=True, meta=meta)

    def rebuild_request(self, response, call_back):
        # 多次未请求成功的放入redis库
        if response.meta.get('verifyTimes', 0)>1:
            url = '%s<!>%s' % (response.meta.get('first_call'), response.meta.get('city_code', ''))
            self.rent_service.insert_info_redis(url, table=settings.REDIS_DETAILLINK_CZOWNER_OUT)
            logging.exception('\n*** ***\n GIVE UP URL \n {0}\nn*** ***'.format(url))
            return False
        # 请求被重定向到验证页面，重新发送请求
        proxy = re.search(r'\d+\.\d+\.\d+\.\d+:\d+', response.meta.get('proxy', '')).group()
        meta = {
            'download_timeout': response.meta.get('download_timeout', 3),
            'first_call': response.meta.get('first_call'),
            'referer': response.meta.get('referer', ''),
            'city_code': response.meta.get('city_code', ''),
            'proxy': proxy,
            'verifyTimes': response.meta.get('verifyTimes', 0) + 1
        }
        return scrapy.Request(response.meta.get('first_call'), meta=meta, dont_filter=True, callback=call_back)

    def parse(self, response):
        if 'verifycode' in response.url or 'callback' in response.url:
            new_request = self.rebuild_request(response, self.parse)
            if new_request: yield self.rebuild_request(response, self.parse)
        else:
            try:
                houseinfo = RentOfOwner()
                house_id = re.search(r'/(\d+)x\.shtml', response.url).group(1)
                base_url = re.search(r'\w+\.58\.com', response.url).group()
                response_ = self.rent_service.replace_font(response)
                title = response_.xpath(RENT_OWNER_58['detail_title'])[0]
                address_bar = response_.xpath(RENT_OWNER_58['address_bar'])
                pay_type = response_.xpath(RENT_OWNER_58['pay_type'])
                unit_price = response_.xpath(RENT_OWNER_58['unit_price_detail'])[0]
                images = response_.xpath(RENT_OWNER_58['images'])
                general_item_wrap = response_.xpath(RENT_OWNER_58['detail_baseinfo'])
                publisher_name = response_.xpath(RENT_OWNER_58['publisher_name'])[0]

                houseinfo['city_code'] = response.meta.get('city_code')
                houseinfo['house_id'] = house_id
                houseinfo['web_belong'], houseinfo['unit_price'] = RENT_OWNER_58['web'], unit_price
                houseinfo['title'], houseinfo['publisher_phone'] = title, ''
                # 地级市 Prefectural-Level city
                pl_city = PLCITY_TO_URL.get(base_url, '')
                if pl_city and pl_city in address_bar[1]:
                    houseinfo['district_belong'] = pl_city
                    houseinfo['business_belong'] = address_bar[-1].replace('合租房', '').replace('租房', '').replace(' ', '')
                else:
                    houseinfo['district_belong'] = address_bar[2].replace('合租房', '').replace('租房', '').replace(' ', '')
                    houseinfo['business_belong'] = address_bar[-1].replace('合租房', '').replace('租房', '').replace(' ', '')
                # 判断区与商圈取到的是否为同一个值，若是，说明没有商圈，需更改
                if houseinfo['district_belong'] == houseinfo['business_belong']:
                    houseinfo['business_belong'] = None
                if pl_city == '章丘' and houseinfo['business_belong'] != '章丘城区':
                    houseinfo['business_belong'] = houseinfo['business_belong'].replace('城区', '')
                houseinfo['pay_type'] = pay_type[0] if len(pay_type)>0 else None
                houseinfo['publisher_name'] = publisher_name.replace(' ', '')
                # 图片
                picture = []
                for p in images:
                    res = re.sub('w=\d+&h=\d+', 'w=700&h=480', p)
                    picture.append(res)
                houseinfo['picture'] = json.dumps(picture)
                # 房源描述
                for i in response_.xpath('//ul[@class="introduce-item"]/li'):
                    if i.xpath('span[1]/text()')[0]=='房源描述':
                        text = i.xpath('span[2]/p/span/text()') + i.xpath('span[2]/p/text()') + i.xpath('span[2]/text()')
                        houseinfo['description'] = '<br>'.join(text).replace('\n', '')
                    if i.xpath('span[1]/text()')[0]=='出租要求':
                        houseinfo['remark1'] = ''.join(i.xpath('span[2]/em/text()'))
                # 部分基础信息
                for g in general_item_wrap:
                    item_name = g.xpath('span[1]/text()')[0]
                    item_value = g.xpath('span[2]/text()')[0] if len(g.xpath('span[2]/text()'))>0 else ''
                    if not item_value:
                        continue
                    if item_name == '朝向楼层：':
                        houseinfo['orientation'], houseinfo['floor'] = item_value.split('\xa0')[0], item_value.split('\xa0')[-1]
                    if item_name == '详细地址：':
                        houseinfo['address'] = item_value.replace(' ','').replace('\n','')
                    if item_name == '所在小区：':
                        houseinfo['community_name'] = g.xpath('span[2]/a/text()')[0]
                    if item_name == '租赁方式：':
                        houseinfo['lease_type'] = '整租' if '整租' in item_value else '合租' if '合租' in item_value else ''
                    if item_name == '房屋类型：':
                        for i in re.findall(r'(\d+)室(\d+)厅(\d+)卫|(\d+)室(\d+)厅', item_value):
                            houseinfo['room'] = i[0] if i[0] else i[3]
                            houseinfo['hall'] = i[1] if i[1] else i[4]
                            houseinfo['toilet'] = i[2] if i[2] else ''
                        decoration = re.search(r'.*?([\u4E00-\u9FA5]+装修)', item_value)
                        if decoration: houseinfo['decoration'] = decoration.group(1)
                if not houseinfo['publisher_name'] or not houseinfo['unit_price'] or not houseinfo['title'] or not houseinfo['lease_type']:
                    logging.exception('\n*** ***\n Info Not Complete\n*** ***')
                yield houseinfo
            except Exception as e:
                logging.exception('\n*** ***\n chuzu 58 owner \n {0}\n Exception {1}\n*** ***'.format(house_id, str(e)))