#!/usr/bin/env python
# encoding=utf-8

import json
import sys
from copy import deepcopy
import datetime
from scpy.logger import get_logger
from xtls.basecrawler import BaseCrawler
import requests
from xtls.mqwrapper import consumer
from xtls.timeparser import now
from xtls.util import BeautifulSoup
from scpy.xawesome_codechecker import get_ip


reload(sys)
sys.setdefaultencoding('utf-8')

logger = get_logger(__file__)


BASE_DATA_FORMAT = {
    'logo': '',               # -
    'companyShortName': '',   # -
    'source': 'lagou',        # -
    '_id': '',                #
    'releaseTime': '',        #
    'keywords': '',           # -
    'companyId': '',          # -
    'jobNature': '',          # -
    'category': '',           # - 技术/运营/...
    # 'type': '',               # 后端/前端/...
    # "sourceType": "",         # Java/Python/...
    "sourceCity": "",         # -
    'degree': '',             # -  学历要求
    'workingYears': '',       # -  工作年限
    'salary': '',             # -  工资
    'companyName': '',        # -
    'industry': '',           # -  行业
    'scale': '',              # -  公司规模
    'welfare': [],            # - 福利待遇
    'entity': '',             # -  公司性质
    'description': '',        # -  职位描述
    'jobTitle': '',           # -
    'sourceUrl': '',          # -
    'position': '',           # -  职位
    'address': '',            # - 公司地址
    'phone': '',              # - 联系电话
    'companyUrl': '',         # -  公司网址
    "requireNum": '',
    "updateTime": '',
}
CIDSEARCH_URL = 'http://www.lagou.com/jobs/list_{}'
INDEX_URL = 'http://www.lagou.com/'
AJAX_URL = 'https://www.lagou.com/gongsi/searchPosition.json'


class LagouCrawler(BaseCrawler):
    def __init__(self, company_name):
        super(LagouCrawler, self).__init__(company_name=company_name)
        self.companyName = company_name
        self.cid = self.search_cid_by_companyName()
        self.postdata = {
            'companyId': self.cid,
            'positionFirstType': u'全部',
            'pageNo': 1,
            'pageSize': 10
        }
        self.header = {
            'Accept':'application/json, text/javascript, */*; q=0.01',
            'Referer':'https://www.lagou.com/gongsi/j347.html',
            'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
            'Content-Type':'application/x-www-form-urlencoded',
            'X-Anit-Forge-Code':'77769973',
            'X-Anit-Forge-Token':'3fa66dea-0ae6-40ee-b0c9-3ca66703ef0a',
            'X-Requested-With':'XMLHttpRequest',
            'Origin':'https//www.lagou.com',
            'Host':'www.lagou.com',
            'Pragma':'no-cache',
            'Accept-Encoding':'gzip, deflate',
            'Accept-Language':'zh-CN,zh;q=0.8',
            'Cache-Control':'no-cache',
            'Connection':'keep-alive',
            'Cookie':'LGUID=20160301115450-592bb5b1-df61-11e5-90c1-5254005c3644; user_trace_token=20160301115450-205562f28761449e8b708a0dd22724a3; tencentSig=6508612608; LGMOID=20161024183613-9A92F23A9727D51B4C174D54CD036543; index_location_city=%E5%85%A8%E5%9B%BD; HISTORY_POSITION=2501100%2C15k-25k%2CMegvii%2C%E6%B5%8B%E8%AF%95%E5%BC%80%E5%8F%91%E5%B7%A5%E7%A8%8B%E5%B8%88%7C; JSESSIONID=6BC169B7F23461497CAB8109B2E94BAE; _gat=1; PRE_UTM=; PRE_HOST=; PRE_SITE=; PRE_LAND=http%3A%2F%2Fwww.lagou.com%2F; SEARCH_ID=144a6937dbce45cea6bc966c04327347; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1477305373; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1477893496; _ga=GA1.2.1519605344.1456804491; LGSID=20161031135644-cd883af1-9f2e-11e6-b1ba-5254005c3644; LGRID=20161031135817-04d06d85-9f2f-11e6-be36-525400f775ce'
        }

    def search_cid_by_companyName(self):
        url = CIDSEARCH_URL.format(self.companyName)
        response = self.get(url)
        soup = BeautifulSoup(response,'html5lib')
        try:
            cid = soup.find('ul',attrs={'class':'item_con_list'}).find('li',{'class':'c_btn'}).find('a').attrs['data-lg-tj-cid']
            return int(cid)
        except:
            return ''


    def find_more(self, html):
        result = {}
        if not html:
            return result
        soup = BeautifulSoup(html)

        job_desc = soup.find('dd', attrs={'class': 'job_bt'})
        result['description'] = job_desc.getText().strip() if job_desc else ""

        for li in soup.find('ul', attrs={'class': 'c_feature'}).find_all('li'):
            if li.getText().strip().startswith(u'主页'):
                result['companyUrl'] = li.find('a')['href']
        try:
            h1 = soup.find('dt', attrs={'class': 'clearfix join_tc_icon'}).find('h1')
            result['jobTitle'] = h1.getText().replace('\n', '').strip()
            result['position'] = h1['title']
        except:
            pass
        try:
            result['address'] = soup.find('dl', attrs={'class': 'job_company'}).findChildren('div')[-2].getText()
        except:
            pass
        return result

    def parse_item(self, job):
        item = deepcopy(BASE_DATA_FORMAT)

        item['companyName'] = self.company_name
        item['sourceCity'] = job.get('city', '')
        item['entity'] = job.get('financeStage', '')
        item['jobTitle'] = job.get('positionName', '')
        item['industry'] = job.get('industryField', '')
        item['scale'] = job.get('companySize', '')
        item['companyShortName'] = job.get('companyName', '')
        item['workingYears'] = job.get('workYear', '')
        item['salary'] = job.get('salary', '')
        item['degree'] = job.get('education', '')
        item['category'] = job.get('positionFirstType', '')
        item['_id'] = 'http://www.lagou.com/jobs/%s.html' % job.get('positionId', '')
        # item['keywords'] = job.get('keyWords', '')
        item['keywords'] = job.get('companyLabelList', [])
        item['companyId'] = job.get('companyId', '')
        item['jobNature'] = job.get('jobNature', '')
        # item['releaseTime'] = job.get('bornTime')
        item['releaseTime'] = job.get('createTime')
        if item['releaseTime'].find(':') != -1:
            item['releaseTime'] = datetime.datetime.now().strftime('%Y-%m-%d')
        item['logo'] = INDEX_URL + job.get('companyLogo', '')
        item['welfare'] = filter(lambda x: x, job.get('positionAdvantage', '').split(u' '))
        item['sourceUrl'] = item['_id']
        # item['dataSource'] = 'lagou'
        try:
            item = dict(item, **self.find_more(self.get(item['sourceUrl'])))
        except Exception, e:
            pass

        return item

    def save(self, data):
        pass

    def run(self):
        result = []
        if self.cid == '':
            logger.info('company %s do not search in lagou'%self.companyName)
            return result
        for page in range(1,100):
            self.postdata['pageNo'] = page
            jobs = json.loads(self.post(AJAX_URL, self.postdata))['content']['data']['page']['result']
            if not jobs:
                logger.info('no jobs info for company %s'%self.companyName)
                return result

            for job in jobs:
                item = self.parse_item(job)
                self.save(item)
                result.append(item)
                logger.info('saved data %s: %s' % (self.company_name, item['_id']))
            if len(jobs) < 10:
                break
        return result


def main():
    # crawler recruitment info by companyName
    # LagouCrawler(347, u'阿里巴巴（中国）网络技术有限公司').run()
    LagouCrawler(u'阿里巴巴（中国）网络技术有限公司').run()


if __name__ == '__main__':
    main()
