# -*- coding: utf-8 -*-
from scpy.logger import get_logger
import os
import sys
import requests
from bs4 import BeautifulSoup
import re
import copy
import datetime
import traceback
import time
import json
from copy import deepcopy
from scpy.request_util import get_random_ua


reload(sys)
sys.setdefaultencoding('utf-8')
logger = get_logger(__file__)

CURRENT_PATH = os.path.dirname(__file__)
if CURRENT_PATH:
    CURRENT_PATH = CURRENT_PATH + "/"

# 最大简历获取数
MAX_RESUME_NUM = 200
# 最多爬取的页数
MAX_SEARCH_PAGE = 20
# 每一页的简历上限
MAX_PAGE_SIZE = 20
# 爬取时间跨度 30/90/1024
SEARCH_DAY_PERIOD = 1024

session = requests.Session()
# %(keyword,refreshDaysLimit,pageSize,page)
searchUrl = 'http://www.juxian.com/searchcandidate/list?keywords=%s&employer=&currentIndustries=&jobTitle=&expectationRegions=&minAge=&maxAge=&minEducation=-1&maxEducation=-1&refreshDaysLimit=%d&minWorkYears=&maxWorkYears=&gender=0&expectationIndustries=&resumeLevel=0&size=%d&page=%d'
searchHeader = {
    'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Accept-Encoding':'gzip, deflate, sdch',
    'Accept-Language':'zh-CN,zh;q=0.8',
    'Cache-Control':'no-cache',
    'Connection':'keep-alive',
    'Cookie':'__utma=100925338.474354595.1467804216.1467804216.1467804216.1; __utmc=100925338; __utmz=100925338.1467804216.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; r-c=8e1549c97fd146628e5fa912d05cc2dd;{} .jxtb.auth=lqeJjcRkB1xKJ1OTsNBAPjGY8BpipfgDJQ8/Xx1X7d+m/wH+B29DUSFW6f8meKaFxAkiWNZvtBTjdEw1UNZAj2j14bgJNemC/W91EqJYYhw=; Hm_lvt_2f46e83a37160121350d426532c94e3a=1467804216,1467804225,1467804246,1467804782; Hm_lpvt_2f46e83a37160121350d426532c94e3a=1467872253',
    'Host':'www.juxian.com',
    'Pragma':'no-cache',
    'Referer':'',
    'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:44.0) Gecko/20100101 Firefox/44.0',

}

pageHeader = {
    'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Accept-Encoding':'gzip, deflate, sdch',
    'Accept-Language':'zh-CN,zh;q=0.8',
    'Cache-Control':'no-cache',
    'Connection':'keep-alive',
    'Cookie':'__utma=100925338.474354595.1467804216.1467804216.1467804216.1; __utmc=100925338; __utmz=100925338.1467804216.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; r-c=8e1549c97fd146628e5fa912d05cc2dd; .jxtb.auth.UR3660320741=2016/7/7 14:08:37; .jxtb.auth=lqeJjcRkB1xKJ1OTsNBAPjGY8BpipfgDJQ8/Xx1X7d+m/wH+B29DUSFW6f8meKaFxAkiWNZvtBTjdEw1UNZAj2j14bgJNemC/W91EqJYYhw=; Hm_lvt_2f46e83a37160121350d426532c94e3a=1467804216,1467804225,1467804246,1467804782; Hm_lpvt_2f46e83a37160121350d426532c94e3a=1467872253',
    'Host':'www.juxian.com',
    'Pragma':'no-cache',
    'Upgrade-Insecure-Requests':1,
    'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:44.0) Gecko/20100101 Firefox/44.0',
}



companyList = [ u'上汽大众汽车有限公司',u'杭州誉存科技有限公司', u'重庆澜鼎信息技术有限公司', u'重庆金易房地产开发（集团）有限公司', u'无锡市中卫大药房有限公司',\
               u'江门市江磁电工企业有限公司'u'重庆万光实业集团有限公司', u'重庆钢运置业代理有限公司', u'重庆市金牛线缆有限公司', u'安投融（北京）网络科技有限公司']

"""
  resume trans format
  url
  beginDate
  endDate
  releaseDate
  companyName
  preCompanyName
  NextCompanyName
  detail
  title
  industry
  personalInfo
  jobsExpectInfo
  personalInfo
  educationInfo

"""
def parse_detail(url,headers):
    """
    param:
    -----
    url:url of detail page
    """

    now = datetime.datetime.now()

    response = session.get(url, headers=headers)
    try:
        soup = BeautifulSoup(response.content, 'html5lib')
    except:
        soup = BeautifulSoup(response.content)
        error = traceback.format_exc()
        logger.error(u'parse_detail_soup: '+error)

    """
    get html
    """
    try:
        original_all = soup.find('div', attrs={'class':'main-body jianli-option'})
    except:
        original_all = None
        error = traceback.format_exc()
        logger.error(u'original_codes: '+error)

    '''
    release date
    '''
    try:
        release_table = soup.find('p', attrs={'id':'qiuzhi-state'})
        if release_table is not None:
            releaseDate = release_table.find('span').get_text().split(u'：')[1]
        else:
            releaseDate = None
    except:
        releaseDate = None
        error = traceback.format_exc()
        logger.error(u'parse_detail_release_date: '+error)

    '''
    basic info
    '''
    try:
        basic_table = soup.find('dl', attrs={'id':'rp-basic-info', 'class':'clearfix'})
        if basic_table is not None:
            basicInfo = basic_table.find_all('dd')
        else:
            basicInfo = None
    except:
        basicInfo = None
        error = traceback.format_exc()
        logger.error(u'parse_detail_basicInfo: '+error)

    if basicInfo is not None:
        try:
            name = basicInfo[0].get_text().strip()
        except:
            name = None
            error = traceback.format_exc()
            logger.error(u'parse_detail_name: '+error)

        try:
            sex = basicInfo[1].get_text().strip()
        except:
            sex = None
            error = traceback.format_exc()
            logger.error(u'parse_detail_sex: '+error)

        try:
            dob = basicInfo[3].get_text().strip()
            if dob != u'--':
                dob = dob.replace(u'年', '-')
                dob = dob.replace(u'月', '-01')
            else:
                dob = None
        except:
            dob = None
            error = traceback.format_exc()
            logger.error(u'parse_detail_dob: '+error)

        try:
            degree = basicInfo[6].get_text().strip()
        except:
            degree = None
            error = traceback.format_exc()
            logger.error(u'parse_detail_degree: '+error)

        try:
            startWorkTime = basicInfo[7].get_text().strip()
            if startWorkTime != '--':
                workingYearPattern = re.compile(u'\d+')
                workingExperienceYear = re.findall(workingYearPattern,startWorkTime)[0]
                # workingExperienceYear = float(now.year - int(startWorkTime[:4]))
            else:
                workingExperienceYear = None
        except:
            workingExperienceYear = None
            error = traceback.format_exc()
            logger.error(u'parse_detail_startWorkTime: '+error)

        try:
            seekStatus = basicInfo[9].get_text().strip()
        except:
            seekStatus = None
            error = traceback.format_exc()
            logger.error(u'parse_detail_seekStatus: '+error)
    else:
        name = None
        dob = None
        degree = None
        sex = None
        workingExperienceYear = None
        seekStatus = None

    '''
    work info
    '''
    workingExperienceInfo = []
    try:
        work_table = soup.find_all('div', attrs={'class':'jianliInformation job'})
    except:
        work_table = None
        error = traceback.format_exc()
        logger.error(u'work_table: '+error)

    if work_table is not None and work_table != []:
        for part in work_table:

            try:
                time_comp = part.find('h3')
            except:
                time_comp = None
                error = traceback.format_exc()
                logger.error(u'time_comp: '+error)
            if time_comp is not None:
                try:
                    companyName = time_comp.find('em').get_text().strip()
                except:
                    companyName = None
                    error = traceback.format_exc()
                    logger.error(u'companyName: '+error)

                try:
                    workTime = time_comp.find('span').get_text().strip()
                    workTime = workTime.split(u'-')
                    if workTime[0].strip() == u'至今':
                        beginDate = str(datetime.datetime.now().date())
                    else:
                        beginDate = workTime[0].replace('.', '-').strip() + '-01'
                    if workTime[1].strip() == u'至今' and releaseDate:
                        endDate = releaseDate
                    else:
                        endDate = workTime[1].replace('.', '-').strip() + '-01'
                except:
                    beginDate = None
                    endDate = None
                    error = traceback.format_exc()
                    logger.error(u'work_table_workDate: '+error)
            else:
                companyName = None
                beginDate = None
                endDate = None

            inds_dic = {}
            try:
                indus_scale_table = part.find('dl',attrs={'class':'rp-company-intro'})
            except:
                indus_scale_table = None
                error = traceback.format_exc()
                logger.error(u'indus_scale_table: '+error)
            try:
                if indus_scale_table is not None:
                    indus_dt1 = indus_scale_table.find_all('dt')
                    indus_dt = []
                    for indt in indus_dt1:
                        indus_dt.append(indt.get_text().strip())

                    indus_dd1 = indus_scale_table.find_all('dd')
                    indus_dd = []
                    for indd in indus_dd1:
                        indus_dd.append(indd.get_text().strip())

                    for i in indus_dt:
                        inds_dic.update({i:indus_dd[indus_dt.index(i)]})

                    if not inds_dic.has_key(u'公司行业：'):
                        inds_dic[u'公司行业：'] = None
                else:
                    inds_dic[u'公司行业：'] = None
            except:
                inds_dic[u'公司行业：'] = None
                error = traceback.format_exc()
                logger.error(u'work_table_indus: '+error)

            try:
                title_table = part.find('h4')
                if title_table is not None:
                    title_table = title_table.get_text().split(u'：')[1]
                    title = title_table.split(u'（')[0].strip()
                else:
                    title = None
            except:
                title = None
                error = traceback.format_exc()
                logger.error(u'title: '+error)

            detail_dic = {}
            try:
                detail_table = part.find_all('dl')[0]
            except:
                detail_table = None
                error = traceback.format_exc()
                logger.error(u'detail_table: '+error)
            try:
                if detail_table is not None:
                    detail_dt1 = detail_table.find_all('dt')
                    detail_dt = []
                    for dett in detail_dt1:
                        detail_dt.append(dett.get_text().strip())

                    detail_dd1 = detail_table.find_all('dd')
                    detail_dd = []
                    for detd in detail_dd1:
                        detail_dd.append(detd.get_text().strip())

                    for i in detail_dt:
                        detail_dic.update({i:detail_dd[detail_dt.index(i)]})
                        # print 'detail',json.dumps(detail_dic, ensure_ascii=False, indent=4)

                    if not detail_dic.has_key(u'工作内容：'):
                        detail_dic[u'工作内容：'] = None
                else:
                    detail_dic[u'工作内容：'] = None
            except:
                detail_dic[u'工作内容：'] = None

            workingExperienceInfo.append(
                {
                    'companyName':companyName,
                    'title':title,
                    'beginDate':beginDate,
                    'endDate':endDate,
                    'industry':inds_dic[u'公司行业：'],
                    'detail':detail_dic[u'工作内容：']
                }
            )
    else:
        workingExperienceInfo = None

    '''
    education info
    '''
    educationInfo = []
    try:
        edu_table = soup.find_all('div', attrs={'class':'jianliInformation education'})
    except:
        edu_table = None
        error = traceback.format_exc()
        logger.error(u'edu_table: '+error)

    if edu_table is not None and edu_table != []:
        for each in edu_table:
            try:
                eduTime = each.find('span').get_text().strip()
            except:
                eduTime = None
                error = traceback.format_exc()
                logger.error(u'edu_table_eduTime: '+error)
            try:
                if eduTime is not None:
                    beginDate = eduTime.split('-')[0]
                    if beginDate.strip() == u'至今':
                        beginDate = str(datetime.datetime.now().date())
                    else:
                        beginDate = beginDate.replace('.', '-').strip() + '-01'

                    endDate = eduTime.split('-')[1].strip()
                    if endDate.strip() == u'至今':
                        endDate = str(datetime.datetime.now().date())
                    else:
                        endDate = endDate.replace('.', '-').strip() + '-01'
                else:
                    beginDate = None
                    endDate = None
            except:
                beginDate = None
                endDate = None
                error = traceback.format_exc()
                logger.error(u'edu_table_time: '+error)

            try:
                edu_school = each.find_all('em')
            except:
                edu_school = None
                error = traceback.format_exc()
                logger.error(u'edu_table_edu_school: '+error)

            if edu_school is not None:
                try:
                    school = edu_school[0].get_text().strip()
                except:
                    school = None
                    error = traceback.format_exc()
                    logger.error(u'edu_table_school: '+error)

                try:
                    degree = edu_school[1].get_text().strip()
                except:
                    degree = None
                    error = traceback.format_exc()
                    logger.error(u'edu_table_degree: '+error)

                try:
                    major = edu_school[2].get_text().strip()
                except:
                    major = None
                    error = traceback.format_exc()
                    logger.error(u'edu_table_major: '+error)
            else:
                school = None
                degree = None
                major = None

            educationInfo.append({'school':school, 'beginDate':beginDate, 'endDate':endDate, 'degree':degree, 'major':major})
    else:
        educationInfo = None

    '''
    job expectation
    '''
    try:
        jobExpect_table = soup.find('div', attrs={'class':'jianliInformation jobIntention'})
    except:
        jobExpect_table = None
        error = traceback.format_exc()
        logger.error(u'job_expect_table: '+error)

    if jobExpect_table is not None:
        job_dd1 = jobExpect_table.find_all('dd')
        job_dd = []
        for each in job_dd1:
            job_dd.append(each.get_text().strip())
        # print 'dd',json.dumps(dd, ensure_ascii=False, indent=4)

        job_dt1 = jobExpect_table.find_all('dt')
        job_dt = []
        for each in job_dt1:
            job_dt.append(each.get_text().strip())
        # print 'job_dt', json.dumps(job_dt, ensure_ascii=False, indent=4)

        jobExpect_dic = {}
        for i in job_dt:
            jobExpect_dic.update({i:job_dd[job_dt.index(i)]})
        # print json.dumps(jobExpect_dic, ensure_ascii=False, indent=4)

        if jobExpect_dic.has_key(u'期望月薪：'):
            try:
                jobExpect_dic[u'期望月薪：'] = float(re.findall(u'(\d+)', jobExpect_dic[u'期望月薪：'])[0])
            except:
                jobExpect_dic[u'期望月薪：'] = None
                error = traceback.format_exc()
                logger.error(u'jobExpect_dic_expectSalary: '+error)
        elif not jobExpect_dic.has_key(u'期望月薪：'):
            jobExpect_dic[u'期望月薪：'] = None

        if not jobExpect_dic.has_key(u'期望城市：'):
            jobExpect_dic[u'期望城市：'] = None

        if not jobExpect_dic.has_key(u'期望行业：'):
            jobExpect_dic[u'期望行业：'] = None

        if not jobExpect_dic.has_key(u'期望职位：'):
            jobExpect_dic[u'期望职位：'] = None

        jobsExpectInfo = {
                'targetArea':jobExpect_dic[u'期望城市：'],
                'targetIndustry':jobExpect_dic[u'期望行业：'],
                'targetPosition':jobExpect_dic[u'期望职位：'],
                'seekStatus':seekStatus,
                'estArrivalTime':None,
                'expSalary':jobExpect_dic[u'期望月薪：'],
                'currentStatus':None
            }
        # print json.dumps(jobsExpectInfo, ensure_ascii=False, indent=4)
    else:
        jobsExpectInfo = None

    '''
    combine resume result
    '''
    resumeInfo = {
        'releaseDate':releaseDate,
        'url':url,
        'personalInfo':{
            'name':name,
            'degree':degree,
            'dob':dob,
            'sex':sex,
            'workingExperienceYear':workingExperienceYear,
            'location':None,
            'registerLocation':None
        },
        'workingExperienceInfo':workingExperienceInfo,
        'jobsExpectInfo':jobsExpectInfo,
        'educationInfo':educationInfo,
    }
    if resumeInfo['workingExperienceInfo'] == []:
        resumeInfo['workingExperienceInfo'] = None
    if resumeInfo['educationInfo'] == []:
        resumeInfo['educationInfo'] = None

    return {'html':original_all, 'parse':resumeInfo}



def crawlerCompany(companyName):
    resumeList = []
    parse_content = {}
    html_content = []
    urlPattern = re.compile(u'\(\'(.*)\'\)')
    for page in range(1, MAX_SEARCH_PAGE+1):
        pageUrl = searchUrl%(companyName,SEARCH_DAY_PERIOD,MAX_PAGE_SIZE,page)
        # random ua
        searchHeader['User-Agent'] = get_random_ua()
        searchHeader['Cookie'] = searchHeader['Cookie'].format(login_cookies())
        response = session.get(pageUrl, headers=searchHeader)
        if response.status_code == 200:
            try:
                soup = BeautifulSoup(response.content,'html5lib')
            except:
                soup = BeautifulSoup(response.content)
            check_login(soup)
            url_table = soup.find_all('li',{'class':'listview-jinali'})
            if url_table is not None:
                if len(url_table) == 0:
                    logger.info('companyName: %s -> page %d: no resume displayed'%(companyName, page))
                    break
                else:
                    li_list = []
                    for urlItem in url_table:
                        urlString = urlItem.attrs['onclick']
                        urlResume = re.findall(urlPattern,urlString)[0]
                        li_list.append('http://www.juxian.com'+urlResume)

                    for url in li_list:
                        #random ua
                        pageHeader['User-Agent'] = get_random_ua()
                        resume, html = parse_detail(url,headers=pageHeader).values()
                        if html is not None:
                            html_each = {'url': str(url), 'html':str(html), 'createdAt':str(datetime.datetime.now().date())}
                            if html_each not in html_content:
                                html_content.append(html_each)

                        # resume = parse_detail(url,headers=pageHeader)['parse']
                        if resume not in resumeList:
                            resumeList.append(resume)
                        time.sleep(5)
                        logger.info('companyName: %s -> page: %d, no.: %d'%(companyName, page, li_list.index(url)+1))
                    if len(li_list) < MAX_PAGE_SIZE:
                        logger.info('companyName: %s -> page: %d less than %d resume parse breaking!'%(companyName, page, MAX_PAGE_SIZE))
                        break
                    if len(resumeList) > MAX_RESUME_NUM:
                        logger.info('enough resume num for %s!'%companyName)
                        break
            else:
                logger.info('companyName: %s -> page %d: url_table is None'%(companyName, page))
                break
        time.sleep(5)
    if len(str(datetime.datetime.now().month)) == 1:
        period = str(datetime.datetime.now().year) + '-0' + str(datetime.datetime.now().month)
    else:
        period = str(datetime.datetime.now().year) + '-' + str(datetime.datetime.now().month)

    if resumeList != []:
        parse_content.update(
            {
                'companyName': companyName,
                'jsonData':resumeList,
                'createdAt':str(datetime.datetime.now().date()),
                'period':period
            }
        )
    else:
        pass
    return resumeList

def check_login(soup):
    if not soup.select_one("#listview"):
        logger.info('page login failed')
        sys.exit()

def login_cookies():
    url = 'http://www.juxian.com/account/signin'
    data = {
        'email': 'chi.che@socialcredits.cn',
        'password': 'jy03055084',
        'ProfileType': 1,
        'returnUrl': '',
    }
    header = {
        'User-Agent':get_random_ua(),
    }
    response = requests.post(url,data=data,headers=header)
    cookies = ''
    for Item in response.cookies.items():
        cookies += Item[0] + '='
        cookies += Item[1] + ';'
    return cookies



def save(resumeList, companyName):
    result = list()
    try:
        count = 0
        for resumeItem in resumeList:
            count += 1
            logger.info('saving No.%d resume for company %s'%(count, companyName))
            # leaveingTime, findCompany = parse_leaveing_company_time(resumeItem, companyName)
            # if findCompany:
            #     data = copy.deepcopy(resumeItem)
            #     data['_id'] = data['url']
            #     data['monitorCompany'] = companyName
            #     data['leaveingTime'] = leaveingTime
            #     data['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d')
            #     DB_MONITOR.find_one_and_update({'_id':data['_id']}, {'$set':data}, upsert=True)
            # else:
            #     continue
            saveDataBase = {
              'url': resumeItem.get('url','').split('?keywords')[0],
              'beginDate':'',
              'endDate':'',
              'releaseDate':resumeItem.get('releaseDate'),
              'companyName':'',
              'preCompanyName': None,
              'NextCompanyName': None,
              'detail':'',
              'jobTitle':'',
              'industry':'',
              'personalInfo':resumeItem.get('personalInfo',{}),
              'jobsExpectInfo':resumeItem.get('jobsExpectInfo',{}),
              'educationInfo':resumeItem.get('educationInfo',{})
            }
            for i in range(0,len(resumeItem.get('workingExperienceInfo',[]))):
                saveData = deepcopy(saveDataBase)
                workingCompany = resumeItem.get('workingExperienceInfo',[])[i]
                nextCompanyIndex = i - 1
                preCompanyIndex = i + 1
                if nextCompanyIndex in range(len(resumeItem.get('workingExperienceInfo',[]))):
                    nextCompanyName = resumeItem.get('workingExperienceInfo',[])[nextCompanyIndex].get('companyName')
                else:
                    nextCompanyName = None
                if preCompanyIndex in range(len(resumeItem.get('workingExperienceInfo',[]))):
                    preCompanyName = resumeItem.get('workingExperienceInfo',[])[preCompanyIndex].get('companyName')
                else:
                    preCompanyName = None
                company_name = workingCompany .get('companyName', '')
                beginDate = workingCompany.get('beginDate', '')
                endDate = workingCompany.get('endDate', '')
                detail = workingCompany.get('detail', '')
                jobTitle = workingCompany.get('title', '')
                industry = workingCompany.get('industry', '')
                saveData['companyName'] = company_name
                saveData['beginDate'] = beginDate
                saveData['endDate'] = endDate
                saveData['jobTitle'] = jobTitle
                saveData['industry'] = industry
                saveData['detail'] = detail
                saveData['preCompanyName'] = preCompanyName
                saveData['nextCompanyName'] = nextCompanyName
                result.append(saveData)
        return result
    except Exception,e:
        logger.error('saving %s resume failed for %s'%(companyName, str(e)))
        return result

def run(companyList):
    count = 0
    for companyName in companyList:
        count += 1
        logger.info('parse No.%d company %s'%(count, companyName))
        save(crawlerCompany(companyName),companyName)


def run_single_company(companyName):
    logger.info('searching %s resume from juxian.com'%companyName)
    company_resume_list = crawlerCompany(companyName)
    return company_resume_list



if __name__ == '__main__':
    # # companyName = u'上海协大国际贸易有限公司合肥分公司'
    # # companyName = u'艾达索高新材料无锡有限公司'
    # # companyName =u'上海高锵石油机械设备有限公司'
    # # companyName = u'深圳广联赛讯有限公司'
    companyName = u'TCL通讯惠州研发中心'
    # companyName = u'宏远控股集团'
    # # companyName = u'深圳清溢光电股份有限公司'
    # companyName = u'杭州誉存科技有限公司'
    companyName = u'北京京宝融信息科技有限公司'
    run_single_company(companyName)
    # resultDic = crawlerCompany(companyName)
    # print json.dumps(resultDic,ensure_ascii=False,indent=1)
    # login_cookies()
    # run(companyList)