# -*- coding: utf-8 -*-
from scpy.logger import get_logger
import os
import sys
import requests
from bs4 import BeautifulSoup
import re
import copy
import datetime
import traceback
import time
import json
from copy import deepcopy
from scpy.request_util import get_random_ua

# from M2Crypto import RSA
# import rsa



reload(sys)
sys.setdefaultencoding('utf-8')

logger = get_logger(__file__)

CURRENT_PATH = os.path.dirname(__file__)
if CURRENT_PATH:
    CURRENT_PATH = CURRENT_PATH + "/"
header = {
        'User-Agent':get_random_ua()
        }
# session = requests.Session()

# def test():
#     data = {
#         'name': '15823909043',
#         'pw': '7e1ef36e810141e24c368e70731ac05caacbe9b14658bd08993c58fac9383345a568aa56e9c158d923efe7a2e4fbd15f7fbcf657b75560d88fe00ead3cc7bfbd4c54b3b77965650a7b78db223e1b52ace3832221737c93c80b456660e97a02f5588a7d5085be7e3ea2b0c7579e6ff8947cddbefd694fe2e7837784786ff93be9',
#         'code': '',
#         'rem_l': '1',
#         'precode': '1',
#         'invokeSign': '',
#         'lt': ''
#     }
#     url_login = 'http://passport.chinahr.com/qy/buser/login'
#     url = 'http://passport.chinahr.com/qy/buser/subLogin'
#     response_origin = session.get(url_login)
#     soup_origin = BeautifulSoup(response_origin.content)
#     data['invokeSign'] = soup_origin.find('input',attrs={'id':'login_sign'}).attrs['value']
#     data['lt'] = soup_origin.find('input',attrs={'id':'lt'}).attrs['value']
#     response = session.post(url, data=data)
#     if response.status_code == 200:
#         pass
#     else:
#         pass
#     res = session.get("http://qy.chinahr.com/cv/sou/?keyword=%E6%9D%AD%E5%B7%9E%E8%AA%89%E5%AD%98%E7%A7%91%E6%8A%80%E6%9C%89%E9%99%90%E5%85%AC%E5%8F%B8&live=37%2C401&sortType=1")
#     print res.status_code ,res.content


class ChinaHrCralwer(object):
    def __init__(self, companyName):
        self.companyName = companyName
        self.session = requests.Session()
        self.login()

    def run(self):
        resume_list = list()
        search_url ='http://qy.chinahr.com/cv/sou/'
        count = 1
        params = {
            'keyword':self.companyName,
            'page': count
        }

        response = self.session.get(search_url, params=params, headers=header)
        soup = BeautifulSoup(response.content)
        link_list = soup.find_all('a',attrs={'onclick': "clickButtonLog('from=chr_com_search_list');"})
        for item in link_list:
            resume_url = item.attrs['href']
            resume_list.append(self.parse_resume(resume_url))
        return resume_list

    def parse_resume(self, url):
        resume = dict()
        soup = BeautifulSoup(self.session.get(url).content)
        # releaseDate
        try:
            resume['releaseDate'] = soup.find('div',attrs={'class':'update'}).text.split('：')[1].strip()
        except Exception, e:
            logger.error('parse releaseDate failed for %s'%str(e))
            resume['releaseDate'] = datetime.datetime.now().strftime('%Y-%m-%d')

        # personal info
        resume['personalInfo'] = {
              "name": "",
              "degree": "",
              "workingExperienceYear": "",
              "dob": "",
              "registerLocation": "",
              "sex": "",
              "location": ""
        }
        try:
            # name
            resume['personalInfo']['name'] = soup.find('div',attrs={'class':'wz-name'}).text.strip()
        except Exception, e:
            logger.error('parse name failed for %s'%str(e))
        try:
            # personal info list
            soup_person = soup.find('div', attrs={'class': 'lfInBase'}).find('ul').find_all('li')[0].find_all('em')
            resume['personalInfo']['sex'] = soup_person[0].text
            age = int(re.findall('\d+', soup_person[1].text)[0])
            resume['personalInfo']['dob'] = (datetime.datetime.now()-datetime.timedelta(days=365*age)).strftime('%Y-%m-%d')
            resume['personalInfo']['location'] = soup_person[2]
        except Exception, e:
            logger.error('parse personal info failed for %s'%str(e))
        try:
            soup_person_1 = soup.find('div', attrs={'class': 'lfInBase'}).find('ul').find_all('li')[1].find_all('em')
            resume['personalInfo']['degree'] = soup_person_1[0].text
            resume['personalInfo']['workingExperienceYear'] = re.findall('\d+', soup_person_1[1].text)[0]
        except Exception, e:
            logger.error('parse personal info failed for %s'%str(e))

        # workingExperience info
        resume['workingExperienceInfo'] = list()
        try:
            soup_working_set = soup.find('div', attrs={'class': 'inforWork'}).find_all('div', attrs={'class': 'ctInfor'})
            for soup_item in soup_working_set:
                working_item = {
                               "endDate": "",
                               "title": "",
                               "beginDate": "",
                               "companyName": "",
                               "detail": "",
                               "industry": ""
                            }
                working_item['title'] = soup_item.find('div', attrs={'class': "tilEdit"}).text
                working_item['detail'] = soup_item.find('div', attrs={'class': "conJob"}).text
                time_text = soup_item.find('div', attrs={'class':"jobTime"}).text
                working_item['beginDate'], working_item['endDate'] = self.parse_time(time_text)
                soup_com_set = soup_item.find('div', attrs={'class':'conCom'}).find_all('p')
                working_item['companyName'] = re.subn(u'（.*?）','',soup_com_set[0].find_all('em')[0])[0]
                working_item['industry'] = soup_com_set[1].split('：')[1]
                resume['workingExperienceInfo'].append(working_item)
        except Exception, e:
            logger.error('parse working experience info failed for %s'%str(e))

        # education info
        resume['educationInfo'] = list()
        try:
            soup_edu_set = soup.find('div', attrs={'class': "edu-mr"}).find_all('div',attrs={'class':'inforWork'})
            for soup_item in soup_edu_set:
                edu_item = {
                   "school": "",
                   "endDate": "",
                   "beginDate": "",
                   "degree": "",
                   "major": ""
                }
                edu_item['school'] = soup_item.find('div', attrs={'class':'tilEdit'})
                edu_item['degree'] = soup_item.find('div', attrs={'class':'conCom'}).find_all('em')[0].text
                edu_item['major'] = soup_item.find('div', attrs={'class':'conCom'}).find_all('em')[1].text
                time_text = soup_item.find('div', attrs={'class':"jobTime"}).text
                edu_item['beginDate'], edu_item['endDate'] = self.parse_time(time_text)
                resume['educationInfo'].append(edu_item)
        except Exception, e:
            logger.error('parse education experience info failed for %s'%str(e))

        # seeking status
        resume["jobsExpectInfo"] = {
                          "expSalary": "",
                          "targetIndustry": "",
                          "estArrivalTime": "",
                          "targetPosition": "",
                          "currentStatus": "",
                          "seekStatus": "",
                          "targetArea": ""
                         }
        try:
            soup_seeking = soup.find('div', attrs={'class': "inten-mr"}).find_all('div',attrs={'class':'boxInInten01'})
            for soup_item in soup_seeking:
                key = soup_item.find('span').text
                if key == u'求职性质：':
                    resume["jobsExpectInfo"]["seekStatus"] = soup_item.find('em').strip()
                if key == u'目前工作状态：':
                    resume["jobsExpectInfo"]["currentStatus"] = soup_item.find('em').strip()
                if key == u'期望地点：':
                    resume["jobsExpectInfo"]["targetArea"] = soup_item.find('em').strip()
                if key == u'期望薪水：':
                    resume["jobsExpectInfo"]["expSalary"] = soup_item.find('em').strip()
        except Exception, e:
            logger.error('parse seeking status info failed for %s'%str(e))

        print soup

    def parse_time(self, text):
        begin_time = ''
        end_time = ''
        try:
            time_list = text.split('-')
            if len(time_list) == 1:
                end_time = datetime.datetime.strptime(time_list[0].strip(),'%Y.%m').strftime('%Y-%m-%d')
                begin_time = ''
            elif len(time_list) == 2:
                begin_time = datetime.datetime.strptime(time_list[0].strip(), '%Y.%m').strftime('%Y-%m-%d')
                end_time = datetime.datetime.strptime(time_list[1].strip(), '%Y.%m').strftime('%Y-%m-%d')
            else:
                begin_time = ''
                end_time = ''
            return begin_time, end_time
        except Exception, e:
            logger.error('parse time text failed for %s'%str(e))
            return begin_time, end_time

    def login(self):
        data = {
            'name': '15823909043',
            'pw': 'a234d72d879ddb5c32b66ece0dc4c57caeee1696d9b706e1813be9b948eef6ff34a9d6c0ed03909d5fa3e372ac715e829fc70177491c9ccde2a79601831428d063427a310d30f2a067d42a85aad7fe791051bf6a7482e6ec1ef5fa9700ebf8c6a4a153906279dc8a79734e9bf719c13049ba2addbc36251de274dcf0c8880cdf',
            'code': '',
            'rem_l': '1',
            'precode': '1',
            'invokeSign': '',
            'lt': ''
        }
        url_login = 'http://passport.chinahr.com/qy/buser/login'
        url = 'http://passport.chinahr.com/qy/buser/subLogin'
        response_origin = self.session.get(url_login)
        soup_origin = BeautifulSoup(response_origin.content)
        data['invokeSign'] = soup_origin.find('input', attrs={'id': 'login_sign'}).attrs['value']
        data['lt'] = soup_origin.find('input', attrs={'id': 'lt'}).attrs['value']
        response = self.session.post(url, data=data, headers=header)
        soup = BeautifulSoup(response.content)
        # modulus = soup.find('input', attrs={'id':'modulus'}).attrs['value']
        # exponent = soup.find('input', attrs={'id':'exponent'}).attrs['value']

        if response.status_code != 200:
            logger.warn('china hr login failed!')
            sys.exit(1)
        else:
            logger.info('china hr login success!')


if __name__ == '__main__':
    modulus = '00b98f37f2d782e60872322c4a0447fc4a4442649e3857c737d5d56986e0774d6ee67b9bc894dc4c2523c150be856e1ab8782a309a79920948c1fcac79345d0150bd202218368696cfe2fd8148ea910877798be8dd02509ac66f2080a098775cda27acef0e06a6fc3eec0bb5291e31b9cabee08e046060c53432d87ddaa3a0d571'
    n = int(modulus, base=16)
    e = int('010001',base=16)
    value = ord('j')
    trans = hex(value ** e % n)
    # test()
    ChinaHrCralwer(u'重庆猪八戒网络有限公司').run()