#!/usr/bin/env python
# encoding=utf-8

import json
import sys
from copy import deepcopy
import datetime
from scpy.logger import get_logger
from xtls.basecrawler import BaseCrawler
import requests
from xtls.mqwrapper import consumer
from xtls.timeparser import now
from xtls.util import BeautifulSoup
from util.trans_recruitment_format import *
import hashlib
import datetime
import re


reload(sys)
sys.setdefaultencoding('utf-8')

logger = get_logger(__file__)


BASE_DATA_FORMAT = {
    'logo': '',               # -
    'companyShortName': '',   # -
    'source': 'liepin',        # -
    '_id': '',                #
    'releaseTime': '',        #
    'keywords': [],           # -
    'companyId': '',          # -
    'jobNature': '',          # -
    'category': '',           # - 技术/运营/...
    # default
    # 'type': '',               # 后端/前端/...
    # "sourceType": "",         # Java/Python/...
    "sourceCity": "",         # -
    'degree': '',             # -  学历要求
    'workingYears': '',       # -  工作年限
    'salary': '',             # -  工资
    'companyName': '',        # -
    'industry': '',           # -  行业
    'scale': '',              # -  公司规模
    'welfare': [],            # - 福利待遇
    'entity': '',             # -  公司性质
    'description': '',        # -  职位描述
    'jobTitle': '',           # -
    'sourceUrl': '',          # -
    'position': '',           # -  职位
    'address': '',            # - 公司地址
    'phone': '',              # - 联系电话
    'companyUrl': '',         # -  公司网址
    "requireNum": '',
    "updateTime": '',
    # addition
    # "categoryType":'',        # - 职位类型
    # "city":'',                # - 转化后的城市
}
CIDSEARCH_URL = 'http://www.lagou.com/jobs/list_{}'
INDEX_URL = 'http://www.lagou.com/'
AJAX_URL = 'https://www.lagou.com/gongsi/searchPosition.json'
LIEPIN_URL = 'https://www.liepin.com/zhaopin/?key={}&curPage={}'


class LiePinCrawler(BaseCrawler):
    def __init__(self, company_name):
        super(LiePinCrawler, self).__init__(company_name=company_name)
        self.companyName = company_name

    def run(self):
        result = []
        pageCount = 0
        while True:
            url = LIEPIN_URL.format(self.company_name, pageCount)
            response = self.get(url)
            soup = BeautifulSoup(response)
            logger.info('searching No.%d page of company %s from liepin'%((pageCount+1), self.companyName))
            jobInfoList = soup.find('ul',attrs={'class':'sojob-list'}).find_all('a', attrs={'data-promid':"imscid=R000000075"})
            if len(jobInfoList):
                for item in jobInfoList:
                    url_job_item = item.attrs.get('href')
                    jobInfo = self.parse_html(url_job_item)
                    result.append(jobInfo)
                pageCount += 1
            else:
                logger.info('searching company %s from liepin finished'%self.companyName)
                break
        return result

    def parse_html(self, url):
        html = self.get(url)
        soup = BeautifulSoup(html)
        jobInfo = deepcopy(BASE_DATA_FORMAT)

        # base part
        jobInfo['_id'] = url
        jobInfo['companyName'] = self.companyName
        jobInfo['sourceUrl'] = url
        jobInfo['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d')

        # main title
        jobInfo['jobTitle'] = soup.find('div', attrs={'class': 'title-info'}).find('h1').text

        # title part
        jobInfo['salary'] = soup.find('div', attrs={'class': 'job-title-left'}).find('p',attrs={'class':'job-item-title'})\
                            .text.split('\r\n\t\t\t\t\t\t\t\t\t\n')[0]
        titleList = soup.find('div', attrs={'class': 'job-title-left'}).find_all('span')
        jobInfo['sourceCity'] = titleList[1].text
        jobInfo['releaseTime'] = self.parse_time(titleList[2].text)
        jobInfo['degree'] = titleList[3].text
        jobInfo['workingYears'] = titleList[4].text

        # tag part
        tag = soup.find('div', attrs={'class': 'tag-list'})
        jobInfo['keywords'] = tag.text.strip().split('\n')

        # des part
        des = soup.find_all('div', attrs={'class': 'main-message'})[0].text
        jobInfo['description'] = des
        return jobInfo

    def parse_time(self, text):
        try:
            text = text.encode('utf-8')
            releaseTime = datetime.datetime.strptime(text, '%Y-%m-%d')
        except Exception,e:
            text = text.encode('utf-8')
            num = re.findall('\d+',text)
            if len(num):
                count = float()
                for item in num:
                    count += float(item)
                num = count/len(num)
            else:
                return datetime.datetime.now().strftime('%Y-%m-%d')
            if text.find(u'月')!=-1:
                timeDelta = datetime.timedelta(days=30 * num)
                return (datetime.datetime.now()-timeDelta).strftime('%Y-%m-%d')
            if text.find(u'年')!=-1:
                timeDelta = datetime.timedelta(days=365 * num)
                return (datetime.datetime.now()-timeDelta).strftime('%Y-%m-%d')
            return datetime.datetime.now().strftime('%Y-%m-%d')
        return releaseTime


def main():
    # crawler recruitment info by companyName
    # LagouCrawler(347, u'阿里巴巴（中国）网络技术有限公司').run()
    # LagouCrawler(u'阿里巴巴（中国）网络技术有限公司').run()
    # LagouCrawler(u'杭州誉存科技有限公司').run()
    # LagouCrawler(u'上海泛美汽车配件有限公司').run()
    result = LiePinCrawler(u'重庆猪八戒网络有限公司').run()
    print result

if __name__ == '__main__':
    main()
