#!/usr/bin/env python
# encoding=utf-8

"""
脚本 `offcie_crawler.py` 需要定期执行（目前在我的电脑上按照每小时一次执行，目前在我的电脑上已有991279数据，位于 `mongo -> crawler_office`）。
每天执行一次 `calc_building_price.py`，用于计算办公楼的平均房价。
"""

import re
import sys
import time

from pymongo import MongoClient
from scpy.logger import get_logger
from xtls.basecrawler import BaseCrawler
from xtls.timeparser import now, parse_time
from xtls.util import BeautifulSoup, sha1

from config import *

reload(sys)
sys.setdefaultencoding('utf-8')

__author__ = 'xlzd'
logger = get_logger(__file__)
MONGO = MongoClient(MONGO_HOST, MONGO_PORT)
PATTERN_IMG = re.compile(ur'(http[s]?://img.+?)\'')
PATTERN_INFO = re.compile(ur'等级：(.+?) [\s\S]+?/所在楼层：(.+?)\(共(\d+)层\)')
PATTERN_NUMBER = re.compile(ur'\d+')


class OffcieCrawler(BaseCrawler):

    def __init__(self, district):
        super(OffcieCrawler, self).__init__(
            district=district,
            url=district['url'],
            base_url=district['url'][:district['url'].find('zu')-1]
        )

    # @no_exception(on_exception=None)
    def parse_item(self, soup):
        # 解析每条租房信息
        try:
            img_url = PATTERN_IMG.findall(soup.find('img')['onerror'])[0]
        except:
            img_url = ''
        title_a = soup.find('p', class_='title').find('a')
        building_name = soup.find('p', class_='gray6 mt15').find('a')['title']
        address = soup.find('span', class_='iconAdress')['title'].strip()
        try:
            grade, loca, floor = PATTERN_INFO.findall(soup.find('p', class_='gray6 mt10').getText().replace('\n', ''))[0]
        except:
            grade, loca, floor = '', '', -1
        try:
            release_time = parse_time(soup.find('span', class_='ml10 gray9').getText())
        except:
            release_time = ''

        # acreage = int(''.join(PATTERN_NUMBER.findall(soup.find('div', class_='area area2 alignR').getText())))
        acreage = int(soup.find('div', class_='area area2 alignR').getText().replace(u'建筑面积', '').strip())

        price_soup = soup.find('p', class_='mt5 alignR')
        # if price_soup:
        unit_price = float(price_soup.find('span', class_='price').getText().strip())
        unit = price_soup.find('span', class_='ml5').getText().strip()
        if unit == u'元/平米 ・ 天':
            unit_price *= 30
        elif unit != u'元/平米 ・ 月':
            raise RuntimeError(u'房租单位超出预期，需要处理')

        price = int(soup.find('p', class_='danjia alignR mt5 gray6').getText().split(u'元')[0].strip())

        return {
            'title': title_a['title'].strip(),
            'sourceUrl': self.base_url + title_a['href'],
            'image': img_url,
            'building': building_name,
            'address': address,
            'grade': grade.strip(),
            'totalFloor': floor,
            'officeFloor': loca,
            'releaseTime': release_time,
            'acreage': acreage,
            'unitPrice': unit_price,
            'price': price,
        }

    @classmethod
    def is_valuable(cls, soup):
        if not soup:
            return True
        return u'很抱歉，没有找到' not in soup.getText()

    def save(self, item):
        logger.info('save item [%s]' % item['sourceUrl'])
        item['_id'] = sha1(item['sourceUrl'])
        item['updateTime'] = now()
        item['officeDistrict'] = self.district['_id']
        item = MONGO[DB_NAME]['officeItems'].find_one_and_update(
            filter={'_id': item['_id']},
            update={'$set': item},
            upsert=True
        )
        return bool(item)

    def run(self):
        saved = error = 0
        while self.url:
            logger.info('now url : %s' % self.url)
            # ATT： 下面decode的原因是原始内容BeautifulSoup会出现解析错误
            html = self.get(self.url).decode('gb2312', 'ignore')
            soup = BeautifulSoup(html, from_encoding='gb2312')
            if not self.is_valuable(soup.find('div', class_='list-none mt10')):
                # 如果当前查找的地区没有房源，则网站会返回推荐列表，这里过滤
                break

            items_div = soup.find('div', class_='houseList')  # 具体条目
            if not items_div:
                logger.info('no more data, exiting.')
                break
            for item in items_div.find_all('dl', class_='list rel'):
                item_data = self.parse_item(item)
                if not item_data:
                    # 解析可能出错
                    error += 1
                    logger.info('parse error at %s, [%s]' % (item, error))
                    continue
                if self.save(item_data):
                    saved += 1

            if saved >= 10 or error >= 10:
                return
            next_page = soup.find('a', id='PageControl1_hlk_next')
            if not next_page:
                break
            self.url = self.base_url + next_page['href']
            time.sleep(0.5)


def main():
    for district in MONGO[DB_NAME]['officeDistrict'].find().batch_size(10):
        logger.info('now district item : %s' % district['_id'])
        OffcieCrawler(district).run()


if __name__ == '__main__':
    main()
