#!/usr/bin/env python
# encoding=utf-8

import re
import sys
from copy import deepcopy
from datetime import datetime

from pymongo import MongoClient
from scpy.logger import get_logger
from xtls.basecrawler import BaseCrawler
from xtls.codehelper import trytry
from xtls.mqwrapper import consumer
from xtls.timeparser import now
from xtls.util import BeautifulSoup

reload(sys)
sys.setdefaultencoding('utf-8')

__author__ = 'xlzd'
logger = get_logger(__file__)
MONGO = MongoClient('10.132.23.104', 27017)
COMPANY_URL = 'http://jobs.51job.com/all/co{cid}.html'
DEGREE_KEYS = (u'在读学生', u'中专', u'高中', u'大专', u'专科', u'本科', u'研究生', u'硕士', u'博士', u'博士后')

JOB_FORMAT = {
    'logo': '',               # -
    'companyShortName': '',   # -
    'source': '51job',        # -
    '_id': '',                #
    'releaseTime': '',        #
    'keywords': '',           # -
    'companyId': '',          # -
    'jobNature': '',          # -
    'category': '',           # - 技术/运营/...
    # 'type': '',               # 后端/前端/...
    # "sourceType": "",         # Java/Python/...
    "sourceCity": "",         # -
    'degree': '',             # -  学历要求
    'workingYears': '',       # -  工作年限
    'salary': '',             # -  工资
    'companyName': '',        # -
    'industry': '',           # -  行业
    'scale': '',              # -  公司规模
    'welfare': [],            # - 福利待遇
    'entity': '',             # -  公司性质
    'description': '',        # -  职位描述
    'jobTitle': '',           # -
    'sourceUrl': '',          # -
    'position': '',           # -  职位
    'address': '',            # - 公司地址
    'phone': '',              # - 联系电话
    'companyUrl': '',         # -  公司网址
    "requireNum": '',
    "updateTime": '',
}

COMPANY_FORMAT = {
    "_id": "",
    "srcUrl": "",
    "logo": "",
    "companyName": "",
    "entity": "",
    "companySize": "",
    "website": "",
    "industry": "",
    "desc": "",
    "address": "",
}
PATTERN_SCALE = re.compile(ur'\d+\-\d+人')


class Job51Crawler(BaseCrawler):

    def __init__(self, company_id, need_detail=True):
        super(Job51Crawler, self).__init__(company_id=company_id, data=deepcopy(COMPANY_FORMAT))
        self.data['_id'] = company_id
        self.data['srcUrl'] = COMPANY_URL.format(cid=company_id)
        self.need_detail = need_detail

    def find_base_info(self, soup):
        with trytry():
            self.data['logo'] = soup.find('img', class_='cimg').get('src', '')
        self.data['companyName'] = soup.find('h1')['title']

        info = [item.strip() for item in soup.find('p', class_='ltype').getText().split(u'|')]
        if len(info) == 3:
            self.data['entity'] = info[0]
            self.data['companySize'] = info[1]
            self.data['industry'] = info[2]

    def find_company_info(self, soup):
        self.find_base_info(soup.find('div', class_='tHeader tHCop'))
        with trytry():
            self.data['desc'] = soup.find('div', class_='tBorderTop_box bt').getText().strip()
        with trytry():
            self.data['address'] = soup.find('p', class_='fp').getText().replace(u'公司地址：', '').strip()
        with trytry():
            self.data['website'] = soup.find('div', class_='bmsg tmsg inbox').getText().strip()[5:]
        self.save(self.data)
        with trytry():
            return soup.find('input', id='hidTotal')['value']
        return ''

    def save(self, data, coll='job51Company'):
        data['updateTime'] = now()
        return MONGO['crawler_company_all'][coll].find_one_and_update(
            {'_id': data['_id']},
            {'$set': data},
            upsert=True
        )

    def find_job(self, url):
        data = {}
        if not self.need_detail:
            return data
        soup = BeautifulSoup(self.get(url))
        data['description'] = soup.find('div', class_='bmsg job_msg inbox').getText().strip()
        if data['description'].endswith(u'举报\n分享'):
            data['description'] = data['description'][:-5].strip()
        return data

    def parse_jobs(self, soup):
        month = datetime.now().month
        year = datetime.now().year
        for index, item in enumerate(soup.find_all('div', class_='el')):
            job = deepcopy(JOB_FORMAT)
            t = item.find('span', class_='t5').getText().strip()
            job['releaseTime'] = ('%s-' % (year, year-1)[int(t.split('-')[0]) > month]) + t + ' 00:00:00'
            job['salary'] = item.find('span', class_='t4').getText().strip()
            job['sourceCity'] = item.find('span', class_='t3').getText().strip()
            for temp in item.find('span', class_='t2').getText().split('|'):
                text = temp.strip()
                if u'年' in text:
                    job['workingYears'] = text
                elif text.startswith(u'招聘') and text.endswith(u'人'):
                    job['requireNum'] = text[2:-1]
                elif text in DEGREE_KEYS:
                    job['degree'] = text
                else:
                    logger.info('unhandled %s.' % text)
            a = item.find('p', class_='t1').find('a')
            job['_id'] = a['href']
            job['sourceUrl'] = a['href']
            job['jobTitle'] = a['title']

            job['companyUrl'] = self.data['website']
            job['companyName'] = self.data['companyName']
            job['logo'] = self.data['logo']
            job['address'] = self.data['address']
            job['entity'] = self.data['entity']
            job['industry'] = self.data['industry']
            job['scale'] = self.data['companySize']

            job.update(self.find_job(job['sourceUrl']))
            logger.info('now item:  %s' % job['_id'])

            if self.save(job, coll='recruitment'):
                return True
        return False

    def run(self):
        html = self.get(self.data['srcUrl'])
        soup = BeautifulSoup(html)

        if soup.find('div', class_='qxjyxszw'):
            return 404

        total = self.find_company_info(soup)
        if not total:
            return 204
        saved = self.parse_jobs(soup.find('div', id='joblistdata'))
        if saved:
            return 200
        for page in xrange(2, (19+int(total))/20):
            saved = self.parse_jobs(BeautifulSoup(self.post(self.data['srcUrl'], {'pageno': page, 'hidTotal': total})))
            if saved:
                return 200
        return 200


@consumer('10.117.29.191', '51job-company-id', logger=logger)
def task(param):
    rst = 500
    with trytry(logger=logger):
        rst = Job51Crawler(param['companyId']).run()
    MONGO['crawler_log']['job51company'].update_one(
        {'_id': param['companyId']},
        {'$set': {'_id': param['companyId'], 'result': rst}},
        True
    )
    return rst, ''


def main():
    """
    JUST 4 TEST !!!
    """
    for cid in xrange(455, 1000000):
        print 'cid :', cid
        with trytry():
            print Job51Crawler(cid).run()


if __name__ == '__main__':
    task()
