#!/usr/bin/env python
# encoding=utf-8

import json
import re
import sys
from copy import deepcopy

from pymongo import MongoClient
from scpy.logger import get_logger
from xtls.basecrawler import BaseCrawler
from xtls.timeparser import now
from xtls.util import BeautifulSoup
from xtls.codehelper import trytry


reload(sys)
sys.setdefaultencoding('utf-8')

__author__ = 'xlzd'
logger = get_logger(__file__)
MONGO = MongoClient('127.0.0.1', 27017)
COMPANY_URL = 'http://company.liepin.com/{c_id}/'

JOB_FORMAT = {
    'logo': '',               # -
    'companyShortName': '',   # -
    'source': 'liepin',       # -
    '_id': '',                #
    'releaseTime': '',        #
    'keywords': '',           # -
    'companyId': '',          # -
    'jobNature': '',          # -
    'category': '',           # - 技术/运营/...
    # 'type': '',               # 后端/前端/...
    # "sourceType": "",         # Java/Python/...
    "sourceCity": "",         # -
    'degree': '',             # -  学历要求
    'workingYears': '',       # -  工作年限
    'salary': '',             # -  工资
    'companyName': '',        # -
    'industry': '',           # -  行业
    'scale': '',              # -  公司规模
    'welfare': [],            # - 福利待遇
    'entity': '',             # -  公司性质
    'description': '',        # -  职位描述
    'jobTitle': '',           # -
    'sourceUrl': '',          # -
    'position': '',           # -  职位
    'address': '',            # - 公司地址
    'phone': '',              # - 联系电话
    'companyUrl': '',         # -  公司网址
    "requireNum": '',
    "updateTime": '',
}

COMPANY_FORMAT = {
    "_id": "",
    "srcUrl": "",
    "logo": "",
    "companyName": "",
    "companyShortName": "",
    "entity": "",
    "companySize": "",
    "website": "",
    "industry": "",
    "labels": "",
    "desc": "",
    "address": "",
    "fundingStatus": "",
}
PATTERN_SCALE = re.compile(ur'\d+\-\d+人')
PATTERN_NUMBER = re.compile(ur'\d+')


class LiepinCrawler(BaseCrawler):

    def __init__(self, company_id, need_detail=True):
        super(LiepinCrawler, self).__init__(company_id=company_id, data=deepcopy(COMPANY_FORMAT))
        self.data['_id'] = company_id
        self.data['srcUrl'] = COMPANY_URL.format(c_id=company_id)
        self.need_detail = need_detail

    def parse_base(self, soup):
        logo = soup.find('img')
        if logo:
            self.data['logo'] = logo.get('src', '')
        self.data['companyShortName'] = soup.find('h1').getText().strip()
        try:
            self.data['companyName'] = soup.find('p').getText().strip()
        except:
            self.data['companyName'] = self.data['companyShortName']
        with trytry():
            self.data['labels'] = [li.getText().strip() for li in soup.find('ul', class_='welfare clearfix').find_all('li')]

    def parse_side_base(self, soup):
        for li in soup.find_all('li'):
            text = li.getText().strip()
            if text.startswith(u'行业：'):
                self.data['industry'] = text[3:].strip()
            elif text.startswith(u'规模：'):
                self.data['companySize'] = text[3:].strip()
            elif text.startswith(u'地址：'):
                self.data['address'] = text[3:].strip()
            elif text.startswith(u'融资：'):
                self.data['fundingStatus'] = text[3:].strip()
            else:
                logger.info('unhandled %s' % text)

    def parse_other(self, soup):
        if not soup:
            return
        for s in soup:
            title = s.find('h2').getText()
            if title == u'基本信息':
                self.parse_side_base(s)
            else:
                print title

    def parse_company_info(self):
        html = self.get(self.data['srcUrl'])
        soup = BeautifulSoup(html)
        main_soup = soup.find('div', class_='main wrap')
        self.parse_base(main_soup.find('section', class_='clearfix'))
        with trytry():
            self.data['desc'] = main_soup.find('p', attrs={'class': 'profile', 'data-selector': 'detail'}).getText().strip()
        self.parse_other(main_soup.find_all('div', class_='base-info clearfix'))
        self.save(self.data)
        with trytry():
            return int(PATTERN_NUMBER.findall(main_soup.find('h2', class_='job-title').getText())[0])
        return 0

    def find_job_detail(self, url):
        result = {}
        html = self.get(url)
        soup = BeautifulSoup(html)

        for item in soup.find_all('div', class_='job-main main-message '):
            if item.find('h3').getText().strip().startswith(u'职位描述'):
                result['description'] = item.getText().replace(u'职位描述：', '').strip()
        return result

    def parse_jobs(self, page):
        data = self.post('http://company.liepin.com/company/sojob/', data={
            'ecompId': self.data['_id'],
            'pageSize': 15,
            'curPage': page,
            'keywords': '',
            'dq': '',
            'deptId': ''
        })
        json_obj = json.loads(data)
        for job in json_obj.get('data', {}).get('list', []):

            item = deepcopy(JOB_FORMAT)
            item['sourceCity'] = job.get('city', '')
            item['degree'] = job.get('eduLevel', '')
            item['workingYears'] = job.get('workYear', '')
            item['salary'] = job.get('salary', '')
            item['releaseTime'] = job.get('time', '')
            item['position'] = job.get('title', '')
            item['jobTitle'] = job.get('title', '')
            item['sourceUrl'] = job.get('url', '')
            item['_id'] = job.get('url', '')
            item['companyName'] = self.data['companyName']
            item['companyShortName'] = self.data['companyShortName']
            item['scale'] = self.data['companySize']
            item['companyId'] = self.data['_id']
            item['entity'] = self.data['entity']
            item['welfare'] = self.data['labels']
            item['industry'] = self.data['industry']
            item['address'] = self.data['address']
            item['logo'] = self.data['logo']
            item['companyUrl'] = self.data['website']

            item.update(self.find_job_detail(item['sourceUrl']))

        return True

    def save(self, data, coll='liepinCompany'):
        data['updateTime'] = now()
        return MONGO['crawler_company_all'][coll].find_one_and_update(
            {'_id': data['_id']},
            {'$set': data},
            upsert=True
        )

    def run(self):
        total = self.parse_company_info()
        if not total:
            return
        for page in xrange((total-1)/15):
            saved = self.parse_jobs(page)
            if saved:
                return 200
        return 200


def main():
    print '>>', LiepinCrawler(8068348).run()


if __name__ == '__main__':
    main()
