#!/usr/bin/env python
# encoding=utf-8

import json
import sys
from copy import deepcopy

from pymongo import MongoClient
from scpy.logger import get_logger
from xtls.basecrawler import BaseCrawler
from xtls.mqwrapper import consumer
from xtls.timeparser import now
from xtls.util import BeautifulSoup

reload(sys)
sys.setdefaultencoding('utf-8')

__author__ = 'xlzd'
logger = get_logger(__file__)
MONGO = MongoClient('10.132.23.104', 27017)

BASE_DATA_FORMAT = {
    'logo': '',               # -
    'companyShortName': '',   # -
    'source': 'lagou',        # -
    '_id': '',                #
    'releaseTime': '',        #
    'keywords': '',           # -
    'companyId': '',          # -
    'jobNature': '',          # -
    'category': '',           # - 技术/运营/...
    # 'type': '',               # 后端/前端/...
    # "sourceType": "",         # Java/Python/...
    "sourceCity": "",         # -
    'degree': '',             # -  学历要求
    'workingYears': '',       # -  工作年限
    'salary': '',             # -  工资
    'companyName': '',        # -
    'industry': '',           # -  行业
    'scale': '',              # -  公司规模
    'welfare': [],            # - 福利待遇
    'entity': '',             # -  公司性质
    'description': '',        # -  职位描述
    'jobTitle': '',           # -
    'sourceUrl': '',          # -
    'position': '',           # -  职位
    'address': '',            # - 公司地址
    'phone': '',              # - 联系电话
    'companyUrl': '',         # -  公司网址
    "requireNum": '',
    "updateTime": '',
}
INDEX_URL = 'http://www.lagou.com/'
AJAX_URL = 'http://www.lagou.com/gongsi/searchPosition.json'


class LagouCrawler(BaseCrawler):
    def __init__(self, cid, company_name):
        super(LagouCrawler, self).__init__(cid=cid)
        self.company_name = company_name
        self.postdata = {
            'companyId': cid,
            'positionFirstType': u'全部',
            'pageNo': '1',
            'pageSize': '1000'
        }

    def find_more(self, html):
        result = {}
        if not html:
            return result
        soup = BeautifulSoup(html)

        job_desc = soup.find('dd', attrs={'class': 'job_bt'})
        result['description'] = job_desc.getText().strip() if job_desc else ""

        for li in soup.find('ul', attrs={'class': 'c_feature'}).find_all('li'):
            if li.getText().strip().startswith(u'主页'):
                result['companyUrl'] = li.find('a')['href']
        try:
            h1 = soup.find('dt', attrs={'class': 'clearfix join_tc_icon'}).find('h1')
            result['jobTitle'] = h1.getText().replace('\n', '').strip()
            result['position'] = h1['title']
        except:
            pass
        try:
            result['address'] = soup.find('dl', attrs={'class': 'job_company'}).findChildren('div')[-2].getText()
        except:
            pass
        return result

    def parse_item(self, job):
        item = deepcopy(BASE_DATA_FORMAT)

        item['companyName'] = self.company_name
        item['sourceCity'] = job.get('city', '')
        item['entity'] = job.get('financeStage', '')
        item['jobTitle'] = job.get('positionName', '')
        item['industry'] = job.get('industryField', '')
        item['scale'] = job.get('companySize', '')
        item['companyShortName'] = job.get('companyName', '')
        item['workingYears'] = job.get('workYear', '')
        item['salary'] = job.get('salary', '')
        item['degree'] = job.get('education', '')
        item['category'] = job.get('positionFirstType', '')
        item['_id'] = 'http://www.lagou.com/jobs/%s.html' % job.get('positionId', '')
        item['keywords'] = job.get('keyWords', '')
        item['companyId'] = job.get('companyId', '')
        item['jobNature'] = job.get('jobNature', '')
        item['releaseTime'] = job.get('bornTime', '')
        item['logo'] = INDEX_URL + job.get('companyLogo', '')
        item['welfare'] = filter(lambda x: x, job.get('positionAdvantage', '').split(u' '))
        item['sourceUrl'] = item['_id']
        try:
            item = dict(item, **self.find_more(self.get(item['sourceUrl'])))
        except Exception, e:
            pass

        return item

    def save(self, data):
        data['updateTime'] = now()
        return MONGO['crawler_company_all']['recruitment'].find_one_and_update(
            {'_id': data['_id']},
            {'$set': data},
            upsert=True
        )

    def run(self):
        jobs = json.loads(self.post(AJAX_URL, self.postdata))['content']['data']['page']['result']
        if not jobs:
            return 204
        for job in jobs:
            item = self.parse_item(job)
            rst = self.save(item)
            if rst:
                return
            logger.info('saved data %s: %s' % (self.company_name, item['_id']))
        return 200


def main():
    LagouCrawler(347, u'阿里巴巴（中国）网络技术有限公司').run()


@consumer('10.117.29.191', 'lagou-company', logger=logger)
def task(param):
    try:
        rst = LagouCrawler(param['cid'], param['cname']).run()
    except Exception, e:
        logger.exception(e)
        rst = 500
    MONGO['crawler_log']['lagouCompany'].update_one(
        {'_id': param['cid']},
        {'$set': {'_id': param['cid'], 'result': rst}},
        True
    )
    return rst, ''

if __name__ == '__main__':
    task()
