#!/usr/bin/env python
# encoding=utf-8
# update

import re
import sys
sys.path.append('.')
from copy import deepcopy
import json
from scpy.logger import get_logger
from xtls.basecrawler import BaseCrawler
from xtls.codehelper import trytry
from xtls.mqwrapper import consumer
from xtls.timeparser import now
from xtls.util import BeautifulSoup
import hashlib
import datetime
import requests
reload(sys)
sys.setdefaultencoding('utf-8')

logger = get_logger(__file__)
# COMPANY_URL = 'http://jobs.51job.com/all/co{cid}.html'
DEGREE_KEYS = (u'在读学生', u'中专', u'高中', u'大专', u'专科', u'本科', u'研究生', u'硕士', u'博士', u'博士后')

JOB_FORMAT = {
    'logo': '',               # -
    'companyShortName': '',   # -
    'source': '51job',        # -
    '_id': '',                #
    'releaseTime': '',        #
    'keywords': [],           # -
    'companyId': '',          # -
    'jobNature': '',          # -
    'category': '',           # - 技术/运营/...
    # 'type': '',               # 后端/前端/...
    # "sourceType": "",         # Java/Python/...
    "sourceCity": "",         # -
    'degree': '',             # -  学历要求
    'workingYears': '',       # -  工作年限
    'salary': '',             # -  工资
    'companyName': '',        # -
    'industry': '',           # -  行业
    'scale': '',              # -  公司规模
    'welfare': [],            # - 福利待遇
    'entity': '',             # -  公司性质
    'description': '',        # -  职位描述
    'jobTitle': '',           # -
    'sourceUrl': '',          # -
    'position': '',           # -  职位
    'address': '',            # - 公司地址
    'phone': '',              # - 联系电话
    'companyUrl': '',         # -  公司网址
    "requireNum": '',
    "updateTime": '',
}

COMPANY_FORMAT = {
    "_id": "",
    "srcUrl": "",
    "logo": "",
    "companyName": "",
    "entity": "",
    "companySize": "",
    "website": "",
    "industry": "",
    "desc": "",
    "address": "",
}
PATTERN_SCALE = re.compile(ur'\d+\-\d+人')
CIDSEARCH_URL_51JOB = 'http://search.51job.com/jobsearch/search_result.php'
CIDSEARCHHEADER_51JOB = {
        'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Encoding':'gzip, deflate',
        'Content-Type':'application/x-www-form-urlencoded',
        'Host':'search.51job.com',
        'Origin':'http://jobs.51job.com',
        'Referer':'http://jobs.51job.com/all/',
        'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
}
COMPANY_URL_51JOB = 'http://jobs.51job.com/all/co{cid}.html'
# BUCKET_NAME = 'recruitment'
# S3_AWS = S3()


class Job51Crawler(BaseCrawler):

    def __init__(self, companyName, need_detail=True):
        super(Job51Crawler, self).__init__(companyName=companyName, data=deepcopy(COMPANY_FORMAT))
        self.companyName = companyName
        self.company_id = self.search_cid_by_company()
        self.data['_id'] = self.company_id
        self.data['srcUrl'] = COMPANY_URL_51JOB.format(cid=self.company_id)
        self.need_detail = need_detail

    def search_cid_by_company(self):
        companyName = self.companyName.encode('GBK')
        postData = {
                    'stype':1,
                    'lang':'c',
                    'fromType':102,
                    'keywordtype':1,
                    'keyword':'',
                    'jobarea':'',
                    }
        postData['keyword'] = companyName
        response = self.post(CIDSEARCH_URL_51JOB, data=postData,headers=CIDSEARCHHEADER_51JOB)
        # response = requests.post(CIDSEARCH_URL_51JOB, data=postData)
        soup = BeautifulSoup(response,'html5lib')
        try:
            hrefString = soup.find('div',attrs={'class':'dw_table','id':'resultList'}).find_all('div',{'class':'el'})[1].\
                            find('span',{'class':'t2'}).find('a').attrs['href']
            companyCid = re.search('co(\d+)',hrefString).group(1)
        except:
            companyCid = ''
        return companyCid

    def find_base_info(self, soup):
        with trytry():
            self.data['logo'] = soup.find('img', class_='cimg').get('src', '')
        try:
            self.data['companyName'] = soup.find('h1')['title']
        except Exception,e:
            logger.info(str(e))
        try:
            info = [item.strip() for item in soup.find('p', class_='ltype').getText().split(u'|')]
            if len(info) == 3:
                self.data['entity'] = info[0]
                self.data['companySize'] = info[1]
                self.data['industry'] = info[2]
        except Exception,e:
            logger.info(str(e))

    def find_company_info(self, soup):
        self.find_base_info(soup.find('div', class_='tHeader tHCop'))
        with trytry():
            self.data['desc'] = soup.find('div', class_='tBorderTop_box bt').getText().strip()
        with trytry():
            self.data['address'] = soup.find('p', class_='fp').getText().replace(u'公司地址：', '').strip()
        with trytry():
            self.data['website'] = soup.find('div', class_='bmsg tmsg inbox').getText().strip()[5:]
        # self.save(self.data)
        with trytry():
            return soup.find('input', id='hidTotal')['value']
        return ''

    def save(self, job):
        pass
        # job = job_info_trans(job)
        # file_name = hashlib.sha256(json.dumps(job)).hexdigest()
        # origin_key = 'src/'
        # time = datetime.datetime.now()
        # origin_key = origin_key + str(time.year) + '/' + str(time.month) + '/' + str(time.day) + '/' + file_name + '.json'
        # return S3_AWS.upload_file(BUCKET_NAME, origin_key, json.dumps(job,ensure_ascii=False,indent=1))

    def find_job(self, url):
        data = {}
        if not self.need_detail:
            return data
        soup = BeautifulSoup(self.get(url))
        data['description'] = soup.find('div', class_='bmsg job_msg inbox').getText().strip()
        if data['description'].endswith(u'举报\n分享'):
            data['description'] = data['description'][:-5].strip()
        return data

    def parse_jobs(self, soup):
        month = datetime.datetime.now().month
        year = datetime.datetime.now().year
        jobList = list()
        for index, item in enumerate(soup.find_all('div', class_='el')):
            job = deepcopy(JOB_FORMAT)
            t = item.find('span', class_='t5').getText().strip()
            job['releaseTime'] = ('%s-' % (year, year-1)[int(t.split('-')[0]) > month]) + t + ' 00:00:00'
            job['salary'] = item.find('span', class_='t4').getText().strip()
            job['sourceCity'] = item.find('span', class_='t3').getText().strip()
            for temp in item.find('span', class_='t2').getText().split('|'):
                text = temp.strip()
                if u'年' in text:
                    job['workingYears'] = text
                elif text.startswith(u'招聘') and text.endswith(u'人'):
                    job['requireNum'] = text[2:-1]
                elif text in DEGREE_KEYS:
                    job['degree'] = text
                else:
                    logger.info('unhandled %s.' % text)
            a = item.find('p', class_='t1').find('a')
            job['_id'] = a['href']
            job['sourceUrl'] = a['href']
            job['jobTitle'] = a['title']

            job['companyUrl'] = self.data['website']
            job['companyName'] = self.data['companyName']
            job['logo'] = self.data['logo']
            job['address'] = self.data['address']
            job['entity'] = self.data['entity']
            job['industry'] = self.data['industry']
            job['scale'] = self.data['companySize']
            # add for postgres insert
            # job['companyId'] = None
            # job['dataSource'] = 'job51'

            job.update(self.find_job(job['sourceUrl']))
            jobList.append(job)
            # if self.save(job):
            #     logger.info('saved data %s: %s' % (self.companyName, job['_id']))
            # else:
            #     logger.info('saved data %s: %s failed' % (self.companyName, job['_id']))
        return jobList

    def run(self):
        html = self.get(self.data['srcUrl'])
        soup = BeautifulSoup(html)
        jobList = list()
        if self.company_id =='':
            return jobList
        if soup.find('div', class_='qxjyxszw'):
            return jobList

        total = self.find_company_info(soup)
        if not total:
            return jobList
        jobList += self.parse_jobs(soup.find('div', id='joblistdata'))

        for page in xrange(2, (19+int(total))/20):
            jobList += self.parse_jobs(BeautifulSoup(self.post(self.data['srcUrl'], {'pageno': page, 'hidTotal': total})))

        logger.info('crawler company %s lagou job completed'%self.companyName)
        return jobList


# @consumer('10.117.29.191', '51job-company-id', logger=logger)
# def task(param):
#     rst = 500
#     with trytry(logger=logger):
#         rst = Job51Crawler(param['companyId']).run()
#     MONGO['crawler_log']['job51company'].update_one(
#         {'_id': param['companyId']},
#         {'$set': {'_id': param['companyId'], 'result': rst}},
#         True
#     )
#     return rst, ''


def main():
    """
    JUST 4 TEST !!!
    """
    for cid in xrange(455, 1000000):
        print 'cid :', cid
        with trytry():
            print Job51Crawler(cid).run()


# ------------------------------------------------------------
# coding for single company crawler,using in company monitor
# 2016 - 08 - 17
# ------------------------------------------------------------

def search_cid_by_company(companyName):
    companyName = companyName.encode('GBK')
    postData = {
                'stype':1,
                'lang':'c',
                'fromType':102,
                'keywordtype':1,
                'keyword':'',
                'jobarea':'',
                }
    postData['keyword'] = companyName
    response = requests.post(CIDSEARCH_URL_51JOB, data=postData, heads=CIDSEARCHHEADER_51JOB)
    if response.status_code == 200:
        soup = BeautifulSoup(response.content,'html5lib')
        try:
            hrefString = soup.find('div',attrs={'class':'dw_table','id':'resultList'}).find_all('div',{'class':'el'})[1].\
                            find('span',{'class':'t2'}).find('a').attrs['href']
            companyCid = re.search('co(\d+)',hrefString).group(1)
        except:
            companyCid = ''
        return companyCid
    else:
        return ''



# def search_recruitment_info_by_cid(cid):
#     srcUrl = COMPANY_URL_51JOB.format(cid)
#     response = requests.get(srcUrl)
#     if response.status_code == 200:
#         soup = BeautifulSoup(response.content,'html5lib')
#     if soup.find('div', class_='qxjyxszw'):
#             return 404


if __name__ == '__main__':
    companyList = [ u'杭州誉存科技有限公司', u'重庆澜鼎信息技术有限公司', u'上汽大众汽车有限公司', u'重庆金易房地产开发（集团）有限公司', u'无锡市中卫大药房有限公司',\
                   u'江门市江磁电工企业有限公司'u'重庆万光实业集团有限公司', u'重庆钢运置业代理有限公司', u'重庆市金牛线缆有限公司', u'安投融（北京）网络科技有限公司']
    for company in companyList:
        print Job51Crawler(company).run()