# -*- coding: utf-8 -*-
from scpy.logger import get_logger
import os
import sys

from copy import deepcopy
from xtls.basecrawler import BaseCrawler
import json
from xtls.util import BeautifulSoup
import datetime

reload(sys)
sys.setdefaultencoding('utf-8')

logger = get_logger(__file__)

CURRENT_PATH = os.path.dirname(__file__)
if CURRENT_PATH:
    CURRENT_PATH = CURRENT_PATH + "/"

JOB_FORMAT = {
    'logo': '',  # -
    'companyShortName': '',  # -
    'source': 'zhilian',  # -
    '_id': '',  #
    'releaseTime': '',  #
    'keywords': [],  # -
    'companyId': '',  # -
    'jobNature': '',  # -
    'category': '',  # - 技术/运营/...
    # 'type': '',               # 后端/前端/...
    # "sourceType": "",         # Java/Python/...
    "sourceCity": "",  # -
    'degree': '',  # -  学历要求
    'workingYears': '',  # -  工作年限
    'salary': '',  # -  工资
    'companyName': '',  # -
    'industry': '',  # -  行业
    'scale': '',  # -  公司规模
    'welfare': [],  # - 福利待遇
    'entity': '',  # -  公司性质
    'description': '',  # -  职位描述
    'jobTitle': '',  # -
    'sourceUrl': '',  # -
    'position': '',  # -  职位
    'address': '',  # - 公司地址
    'phone': '',  # - 联系电话
    'companyUrl': '',  # -  公司网址
    "requireNum": '',
    "updateTime": '',
}
COMPANY_FORMAT = {
    "_id": "",
    "srcUrl": "",
    "logo": "",
    "companyName": "",
    "entity": "",
    "companySize": "",
    "website": "",
    "industry": "",
    "desc": "",
    "address": "",
}

LOCATION_LIST = [u"北京", u"上海", u"广州", u"深圳", u"天津", u"武汉", u"西安", u"成都", u"大连", u"长春", u"沈阳", u"南京", u"济南", u"青岛", u"杭州", u"苏州", u"无锡",
                 u"宁波", u"重庆", u"郑州", u"长沙", u"福州", u"厦门", u"哈尔滨", u"石家庄", u"合肥", u"惠州", u"广东", u"湖北", u"陕西", u"四川", u"辽宁", u"吉林", u"江苏",
                 u"山东", u"浙江", u"广西", u"安徽",
                 u"河北", u"山西", u"内蒙", u"黑龙江", u"福建", u"江西", u"河南", u"湖南", u"海南", u"贵州", u"云南", u"西藏", u"甘肃", u"青海", u"宁夏", u"新疆", u"香港",
                 u"澳门", u"台湾省"]
ZHILIAN_SEARCH_URL = 'http://sou.zhaopin.com/jobs/searchresult.ashx?jl={}&kw={}&p={}&kt={}'
MAX_PAGE = 10


class ZhilianCrawler(BaseCrawler):
    def __init__(self, companyName, need_detail=True):
        super(ZhilianCrawler, self).__init__(companyName=companyName, data=deepcopy(COMPANY_FORMAT))
        self.company_name = companyName

    def find_company_jobs_url(self):
        jobs_url = []
        for location in LOCATION_LIST:
            for page in range(1,MAX_PAGE):
                url = ZHILIAN_SEARCH_URL.format(location,self.company_name,page,2)
                soup = BeautifulSoup(self.get(url))
                if len(soup.find_all('div',attrs={'class':'newlist_list_content'}))==0:
                    logger.info('[%s] exit at location %s of page %d'%(self.company_name,location,page))
                    break
                jobs_soup_list = soup.find_all('div',attrs={'class':'newlist_list_content'})[0].find_all('table')
                if len(jobs_soup_list)>1:
                    for job_soup in jobs_soup_list[1:]:
                        jobs_url.append(job_soup.find('a').get('href'))
                else:
                    logger.info('[%s] exit at location %s of page %d'%(self.company_name,location,page))
                    break
        return jobs_url

    def find_job_info(self,url):
        job_info = deepcopy(JOB_FORMAT)
        soup = BeautifulSoup(self.get(url))

        # url
        try:
            job_url = url
            job_info['_id'] = job_url
            job_info['sourceUrl'] = job_url
        except Exception,e:
            logger.error('get url failed for %s'%str(e))
        # company_name/job title/welfare
        # job title
        try:
            job_title = soup.find('div',attrs={'class':'fl'}).find('h1').text
            job_info['jobTitle'] = job_title
        except Exception,e:
            logger.error('get job title failed for %s'%str(e))
        # company_name
        try:
            company_name = soup.find('div',attrs={'class':'fl'}).find('a').text
            job_info['companyName'] = company_name
        except Exception,e:
            job_info['companyName'] = self.company_name
            logger.error('get company name failed for %s,using searching companyName'%str(e))
        # welfare
        try:
            welfare_list = [item.text for item in soup.find('div',attrs={'class':'welfare-tab-box'}).find_all('span')]
            job_info['welfare'] = welfare_list
        except Exception,e:
            logger.error('get company welfare failed for %s'%str(e))

        # job detail
        # salary
        detail_soup = soup.find('ul',attrs={'class':'terminal-ul clearfix'})
        try:
            salary_text = detail_soup.find_all('li')[0].strong.text
            job_info['salary'] = salary_text
        except Exception,e:
            logger.error('get company salary failed for %s'%str(e))
        # location text
        try:
            location_text = detail_soup.find_all('li')[1].strong.text
            job_info['sourceCity'] = location_text
        except Exception,e:
            logger.error('get job location failed for %s'%str(e))
        # release time
        try:
            release_time_text = detail_soup.find_all('li')[2].strong.text
            # job_info['releaseTime'] = datetime.datetime.strptime(release_time_text,'%Y-%m-%d')
            job_info['releaseTime'] = release_time_text
        except Exception,e:
            job_info['releaseTime'] = datetime.datetime.now().strftime('%Y-%m-%d')
            logger.error('get releaseTime failed for %s'%str(e))
        # working years text
        try:
            working_years_text = detail_soup.find_all('li')[4].strong.text
            job_info['workingYears'] = working_years_text
        except Exception,e:
            job_info['workingYears'] = 0
            logger.error('get working years failed for %s'%str(e))
        # degree
        try:
            degree_text = detail_soup.find_all('li')[5].strong.text
            job_info['degree'] = degree_text
        except Exception,e:
            job_info['degree'] = u'不限'
            logger.error('get degree failed for %s'%str(e))
        # require num
        try:
            require_num_text = detail_soup.find_all('li')[6].strong.text
            job_info['requireNum'] = require_num_text
        except Exception,e:
            job_info['requireNum'] = 1
            logger.error('get require num failed for %s'%str(e))
        # category
        try:
            category_text = detail_soup.find_all('li')[7].strong.text
            job_info['category'] = category_text
        except Exception,e:
            logger.error('get category failed for %s'%str(e))

        # job describe
        # job detail
        try:
            job_describe_text = soup.find('div',attrs={'class':'terminalpage-main clearfix'}).find('div',attrs={'class':'tab-inner-cont'}).text
            job_info['description'] = job_describe_text
        except Exception,e:
            logger.error('get job describe failed for %s'%str(e))

        # company info
        try:
            company_soup = soup.find('div',attrs={'class':'terminalpage-right'}).find('ul',attrs={'class':'terminal-ul'})
        except:
            company_soup = None
        try:
            for company_info_soup in company_soup.find_all('li'):
                if company_info_soup.find('span').text == '公司规模：':
                    job_info['scale'] = company_info_soup.find('strong').text
                if company_info_soup.find('span').text == '公司性质：':
                    job_info['entity'] = company_info_soup.find('strong').text
                if company_info_soup.find('span').text == '公司行业：':
                    job_info['industry'] = company_info_soup.find('strong').text
                if company_info_soup.find('span').text == '公司主页：':
                    job_info['companyUrl'] = company_info_soup.find('strong').text
                if company_info_soup.find('span').text == '公司地址：':
                    job_info['address'] = company_info_soup.find('strong').text
        except Exception,e:
            logger.error('get company info failed for %s'%str(e))

        # # company scale
        # try:
        #     scale_text = company_soup.find_all('li')[0].find('strong').text
        #     job_info['scale'] = scale_text
        # except Exception,e:
        #     logger.error('get company scale failed for %s'%str(e))
        # # company entity
        # try:
        #     entity_text = company_soup.find_all('li')[1].find('strong').text
        #     job_info['entity'] = entity_text
        # except Exception,e:
        #     logger.error('get company entity failed for %s'%str(e))
        # # company industry
        # try:
        #     industry_text = company_soup.find_all('li')[2].find('strong').text
        #     job_info['industry'] = industry_text
        # except Exception,e:
        #     logger.error('get company industry failed for %s'%str(e))
        # # company url
        # try:
        #     company_url = company_soup.find_all('li')[3].find('strong').text
        #     job_info['companyUrl'] = company_url
        # except Exception,e:
        #     logger.error('get company url failed for %s'%str(e))
        # # company address
        # try:
        #     company_address = company_soup.find_all('li')[4].find('strong').text
        #     job_info['address'] = company_address
        # except Exception,e:
        #     logger.error('get company address failed for %s'%str(e))

        return job_info

    def find_job_info_xiaoyuan(self,url):
        job_info = deepcopy(JOB_FORMAT)
        soup = BeautifulSoup(self.get(url))

        # url
        try:
            job_url = url
            job_info['_id'] = job_url
            job_info['sourceUrl'] = job_url
        except Exception,e:
            logger.error('get url failed for %s'%str(e))

        # company info
        try:
            company_info_soup = soup.find('div',attrs={'class':'cJobDetailInforWrap'}).find_all('ul')

        except Exception,e:
            logger.error('get company info part failed')

        # job title
        try:
            job_title_text = soup.find('div',attrs={'class':'cJobDetailInforWrap'}).find('h1').text.strip()
            job_info['jobTitle'] = job_title_text
        except Exception,e:
            logger.error('get job titile failed for %s'%str(e))

        # industry/scale/entity
        try:
            job_info['industry'] = company_info_soup[0].find_all('li')[3].text
            job_info['scale'] = company_info_soup[0].find_all('li')[5].text
            job_info['entity'] = company_info_soup[0].find_all('li')[7].text
        except Exception,e:
            logger.error('get job company info failed for %s'%str(e))

        # location/category/requireNum/releaseTime
        try:
            job_info['sourceCity'] = company_info_soup[1].find_all('li')[1].text.strip()
            job_info['category'] = company_info_soup[1].find_all('li')[3].text.strip()
            job_info['requireNum'] = company_info_soup[1].find_all('li')[5].text
            job_info['releaseTime'] = company_info_soup[1].find_all('li')[7].text
        except Exception,e:
            logger.error('get job basic info failed for %s'%str(e))
        # job description
        try:
            job_info['description'] = soup.find('div',attrs={'class':'cJob_Detail'}).p.text
        except Exception,e:
            logger.error('get job description failed for %s'%str(e))
        # job address
        try:
            job_info['address'] = soup.find('div',attrs={'class':'cRight'}).find('div',attrs={'class':'cRightTab mt20'}).span.text
        except Exception,e:
            logger.error('get job address failed for %s'%str(e))
        return job_info

    def run(self):
        jobs_list = self.find_company_jobs_url()
        result = []
        if len(jobs_list):
            count = 1
            for job_url in jobs_list:
                logger.info('parse No.%d job at url %s'%(count,job_url))
                if job_url.find('xiaoyuan')==-1:
                    result.append(self.find_job_info(job_url))
                else:
                    result.append(self.find_job_info_xiaoyuan(job_url))
                count += 1
        else:
            logger.info('no jobs info in [zhilian] for company %s'%self.company_name)
        return result

if __name__ == '__main__':
    # ZhilianCrawler(u'北京五八信息技术有限公司').find_company_jobs_url()
    # ZhilianCrawler(u'河北东软软件有限公司').run()
    ZhilianCrawler(u'中国电信集团公司').run()
