# -*- coding: utf-8 -*-
import sys

sys.path.append('/root/anaconda3/lib/python3.6/site-packages')
sys.path.append('../')

from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from tech_keyword import tech_keywords
from multiprocessing import Process
from multiprocessing import Pool
from pymongo import MongoClient
from selenium import webdriver
from common import new_driver
from common import identifier
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
from urllib import request
from urllib import parse

import numpy as np
import requests
import datetime
import hashlib
import pymongo
import random
import math
import codecs
import base64
import json
import pprint
import time
import ssl
import os
import re


class GANJI():
    def __init__(self, keywords, net):
        print(net)
        client = MongoClient(net, 27017, connect = False)

        self.qushu_resume_db = client['qushu_resume_db']
        self.qushu_resume_coll = self.qushu_resume_db['qushu_resume_coll']

        self.driver = new_driver(headless = 0)
        self.keywords = keywords
        self.main_window = self.driver.current_window_handle

        self.login()
        self.start()

    def url2base64(self, picUrl):
        with request.urlopen(picUrl) as web:
            return base64.b64encode(web.read())

    def CalcSign(self, s):
        hl = hashlib.md5()
        hl.update(s.encode(encoding = 'utf-8'))

        return hl.hexdigest()

    def get_code(self, picUrl):
        tm = str(int(time.time()))
        img_data = self.url2base64(picUrl)
        rsp = requests.post('http://pred.fateadm.com/api/capreg', data = {
            'user_id': '103595',
            'timestamp': tm,
            'sign': self.CalcSign('103595' + tm + self.CalcSign(tm + 'qn4iUtbRoSU95do6zEglMUmOLIjkNIiQ')),
            'predict_type': '30400',
            'img_data': img_data
        }).json()

        if rsp['RetCode'] == '0':
            return json.loads(rsp['RspData'])['result']
        else:
            return False

    def login(self):
        try:
            self.driver.get('https://passport.ganji.com/login.php?next=/')
        except:
            return self.login()

        time.sleep(2)

        self.driver.find_element_by_css_selector('.loginPanel .usename').send_keys('杭州趋数')
        self.driver.find_element_by_css_selector('.loginPanel .usepassword').send_keys('123456aaa')

        time.sleep(10)
        # self.driver.find_element_by_css_selector('.loginPanel .submit').click()
        #
        # time.sleep(2)
        #
        # # 打码
        # if self.driver.find_element_by_css_selector('.login-img-checkcode'):
        #     img_src = self.driver.find_element_by_css_selector('.login-img-checkcode').get_attribute('src')
        #
        #     if img_src:
        #         code = self.get_code(img_src)
        #         print(code)
        #         self.driver.find_element_by_css_selector('#login_checkcode_input').send_keys(code)
        #         time.sleep(2)
        #
        # self.driver.find_element_by_css_selector('.loginPanel .submit').click()
        # self.driver.get('http://hrvip.ganji.com/resume_library/search_resume?_rid=0.8409482759480216')

    def start(self):
        for keyword in self.keywords:
            for city_id in ['12', '13', '14', '15', '16', '17', '24', '26', '45', '55', '56', '65', '67', '93', '103', '113', '114', '123', '176', '194', '204']:
                for pageNo in range(1, 10):
                    url = 'http://hrvip.ganji.com/resume_library/search_resume/?category=-1&major=&tag=&city_id=' + city_id + '&district_id=-1&street_id=-1&sex=&degree=&date=&age=&age_start=&age_end=&period=&price=&parttime_price=&key=' + keyword + '&related=0&page=' + str(pageNo) + '&_rid=0.1700687275210666'

                    ret = self.crawl_resume_list(url)
                    if not ret:
                        break
    
    def crawl_resume_list(self, url):
        self.driver.switch_to_window(self.main_window)

        try:
            self.driver.get(url)
        except:
            return self.crawl_resume_list(url)

        time.sleep(5)

        if len(self.driver.find_elements_by_css_selector('.frm-search .bor-bot')) == 0:
            return False

        for item in self.driver.find_elements_by_css_selector('.frm-search .bor-bot'):
            name = item.find_element_by_css_selector('.name04').text
            edu = item.find_element_by_css_selector('.education04').text
            sex = item.find_element_by_css_selector('.gender04').text
            update = item.find_element_by_css_selector('.update-time04').text

            if not '2018' in update:
                continue

            identifier = name + '|' + edu + '|' + sex

            if self.qushu_resume_coll.find_one({'identifier': identifier}):
                continue

            item.find_element_by_css_selector('.job-position04 a').click()
            time.sleep(1)

            self.crawl_resume_info()
            self.driver.switch_to_window(self.main_window)

        return True

    def crawl_resume_info(self):
        for h in self.driver.window_handles:
            if h != self.main_window:
                self.driver.switch_to_window(h)
                break

        if '访问过于频繁' in self.driver.page_source:
            time.sleep(10)

        soup = BeautifulSoup(self.driver.page_source, 'lxml')

        resume_id = ''

        try:
            update_time = soup.find(text = re.compile('更新时间：')).parent.find_next_sibling().text
        except:
            update_time = ''

        try:
            title = soup.find(class_ = 'name-line').find('i').text
        except:
            title = ''

        try:
            company = ''
        except:
            company = ''

        try:
            head_pic = soup.find(class_ = 'resume-avatar').find('img').get('src')
        except:
            head_pic = ''

        try:
            name = soup.find(class_ = 'name-line').find('strong').text
        except:
            name = ''

        try:
            sex = soup.find(class_ = 'name-line').select('span')[0].text
        except:
            sex = ''

        try:
            age = soup.find(class_ = 'name-line').select('span')[1].text
        except:
            age = ''

        try:
            edu = soup.find(class_ = 'resume-basic-info').find(text = re.compile('学历：')).parent.find_next_sibling().text
        except:
            edu = ''

        try:
            exp = soup.find(class_ = 'resume-basic-info').find(text = re.compile('工作年限：')).parent.find_next_sibling().text
        except:
            exp = ''

        try:
            address = ''
        except:
            address = ''

        personal_info = {
            'head_pic': head_pic,
            'name': name,
            'sex': sex,
            'age': age,
            'edu': edu,
            'exp': exp,
            'address': address,
            'title': title,
            'company': company
        }

        try:
            nature = ''
        except:
            nature = ''

        try:
            salary = soup.find(class_ = 'resume-basic-info').find(text = re.compile('期望薪资：')).parent.find_next_sibling().text
        except:
            salary = ''

        try:
            title = soup.find(class_ = 'resume-basic-info').find(text = re.compile('求职意向：')).parent.find_next_sibling().text
        except:
            title = ''

        try:
            location = soup.find(class_ = 'resume-basic-info').find(text = re.compile('工作地点：')).parent.find_next_sibling().text
        except:
            location = ''

        try:
            current = ''
        except:
            current = ''

        wanted_job = {
            'nature': nature,
            'salary': salary,
            'title': title,
            'location': location,
            'current': current,
            'industry': '',
        }

        try:
            self_introduce = soup.find(class_ = 'self-block').get_text().strip()
        except:
            self_introduce = ''

        work_experience = []

        for item in soup.select('.experience-block b'):
            try:
                t = item.find_next_sibling().select('li')[0].find('p').get_text()
            except:
                t = ''

            try:
                describe = item.find_next_sibling().select('li')[2].find('p').get_text()
            except:
                describe = ''

            try:
                title = item.find_next_sibling().select('li')[1].find('p').get_text()
            except:
                title = ''

            try:
                company = item.text
            except:
                company = ''

            work_experience.append({
                'salary': '',
                'time': t,
                'describe': describe,
                'title': title,
                'company': company,
            })

        proj_experience = []

        # for item in soup.select('#projectExperience .mr_jobe_list'):
        #     try:
        #         t = item.find(class_ = 'mr_content_r').text.strip()
        #     except:
        #         t = ''
        #
        #     try:
        #         title = item.find(class_ = 'projectTitle').text.strip()
        #     except:
        #         title = ''
        #
        #     try:
        #         describe = item.find(class_ = 'mr_content_m').get_text().strip()
        #     except:
        #         describe = ''
        #
        #     proj_experience.append({
        #         'time': t,
        #         'title': title,
        #         'describe': describe,
        #         'responsibility': ''
        #     })

        edu_experience = []

        for i in soup.select('.education-block table tbody tr'):
            item = {
                'school': i.select('td')[1].text,
                'time': i.select('td')[0].text,
                'major': i.select('td')[2].text
            }

            edu_experience.append(item)

        skills = []

        if soup.find('span', text = re.compile('专业技能')):
            for i in soup.find('span', text = re.compile('专业技能')).parent.parent.parent.find_next_sibling().select('tr'):
                item = {
                    'name': i.select('td')[0].text.split('：')[0],
                    'degree': i.select('td')[0].text.split('：')[1],
                    'time': i.select('td')[1].text.split('：')[1]
                }

                skills.append(item)

        certificate = []

        if soup.find('span', text = re.compile('证书奖项')):
            for i in soup.find('span', text = re.compile('证书奖项')).parent.parent.parent.find_next_sibling().select('tr'):
                item = {
                    'name': i.select('td')[0].text.split('：')[1],
                    'time': i.select('td')[1].text.split('：')[1],
                    'org': i.select('td')[2].text.split('：')[1]
                }

                certificate.append(item)

        sch_experience = []
        language = []

        identifier = personal_info['name'] + '|' + personal_info['edu'] + '|' + personal_info['sex']

        resume = {
            'resume_id': resume_id,
            'update_time': update_time,
            'personal_info': personal_info,
            'wanted_job': wanted_job,
            'self_introduce': self_introduce,
            'work_experience': work_experience,
            'proj_experience': proj_experience,
            'edu_experience': edu_experience,
            'certificate': certificate,
            'sch_experience': sch_experience,
            'platform': 'ganji',
            'language': language,
            'identifier': identifier,
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'link': self.driver.current_url
        }

        if not self.qushu_resume_coll.find_one({'identifier': identifier, 'platform': 'ganji'}):
            self.qushu_resume_coll.insert_one(resume)
            pprint.pprint(resume)

        time.sleep(1)

        self.driver.close()


def start(keywords, net):
    LAGOU(keywords, net)


if __name__ == '__main__':
    from optparse import OptionParser

    parser = OptionParser(usage = "%prog [options]")
    parser.add_option("-e", "--net", action = "store", type = "str", dest = "net", help = "the network address", default = '47.96.88.18')
    options, args = parser.parse_args()

    GANJI(['店长', '店员', '收银', '收银主管', '陈列师', '督导','促销员'], options.net)
