# -*- coding: utf-8 -*-
import sys

sys.path.append('/root/anaconda3/lib/python3.6/site-packages')

from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from multiprocessing import Process
from multiprocessing import Pool
from pymongo import MongoClient
from selenium import webdriver
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
from urllib import request
from urllib import parse
import numpy as np
import requests
import platform
import datetime
import pymongo
import smtplib
import hashlib
import random
import base64
import pprint
import codecs
import math
import json
import time
import ssl
import os
import re


def fill_code():
    time.sleep(40)


# 代理IP地址
proxy_ip_api = ''


def chrome_driver():
    if proxy_ip_api:
        while True:
            r = requests.get(proxy_ip_api)

            if isinstance(r.json()['msg'], str):
                time.sleep(5 + random.random())
            else:
                break

        proxy = r.json()['msg'][0]['ip'] + ':' + r.json()['msg'][0]['port']
    else:
        proxy = ''

    if 'linux' in platform.platform().lower():
        options = webdriver.FirefoxOptions()
    else:
        options = webdriver.ChromeOptions()

    # options.add_argument('--headless')
    # options.add_argument('--disable-gpu')
    # options.add_argument('--disable-images')

    desired_capabilities = options.to_capabilities()

    if proxy:
        desired_capabilities['proxy'] = {
            "httpProxy": proxy,
            "ftpProxy": proxy,
            "sslProxy": proxy,
            "noProxy": None,
            "proxyType": "MANUAL",
            "class": "org.openqa.selenium.Proxy",
            "autodetect": False
        }

    if 'linux' in platform.platform().lower():
        binary = FirefoxBinary('/usr/local/firefox/firefox')
        driver = webdriver.Firefox(desired_capabilities = desired_capabilities, firefox_binary = binary)
    else:
        if os.path.exists('/Users/xuchaosheng/Workspace/qs-scrapy/chromedriver'):
            executable_path = '/Users/xuchaosheng/Workspace/qs-scrapy/chromedriver'
        else:
            executable_path = '/Users/xuchaosheng/Workspace/qushu-recruit-data/chromedriver'

        driver = webdriver.Chrome(desired_capabilities = desired_capabilities, executable_path = executable_path)

    return driver


client = MongoClient('localhost', 27017, connect = False)

# 职位总库
qushu_qiancheng_db = client['qushu_qiancheng_db']
qushu_qiancheng_position = qushu_qiancheng_db['qushu_qiancheng_position']


class BOSS():
    def __init__(self, menus):
        self.driver = chrome_driver()
        print(menus)
        print('*' * 100)
        self.start(menus)

    def start(self, menus):
        if len(menus):
            for query in menus:
                url = 'https://www.zhipin.com/c100010000/?query=' + query + '&page=1'
                self.scrapy_position_list(url)
                time.sleep(5)

    def url2base64(self, picUrl):
        with request.urlopen(picUrl) as web:
            return base64.b64encode(web.read())

    def CalcSign(self, s):
        hl = hashlib.md5()
        hl.update(s.encode(encoding = 'utf-8'))

        return hl.hexdigest()

    def get_code(self, picUrl):
        tm = str(int(time.time()))
        img_data = self.url2base64(picUrl)
        rsp = requests.post('http://pred.fateadm.com/api/capreg', data = {
            'user_id': '103595',
            'timestamp': tm,
            'sign': self.CalcSign('103595' + tm + self.CalcSign(tm + 'qn4iUtbRoSU95do6zEglMUmOLIjkNIiQ')),
            'predict_type': '304000001',
            'img_data': img_data
        }).json()

        if rsp['RetCode'] == '0':
            return json.loads(rsp['RspData'])['result']
        else:
            return False

    def scrapy_position_list(self, url = ''):
        if url:
            self.driver.get(url)

            if self.driver.page_source.find('为了您的账号安全') != -1:
                soup = BeautifulSoup(self.driver.page_source, 'lxml')

                code_url = 'https://www.zhipin.com' + soup.find(class_ = 'code').get('src')
                code = self.get_code(code_url)

                if code:
                    self.driver.find_element_by_css_selector('#captcha').send_keys(code)
                    self.driver.find_element_by_css_selector('.btn').click()

                    time.sleep(3)
                else:
                    return

        page_source = self.driver.page_source

        soup = BeautifulSoup(page_source)

        for item in soup.select('.job-list li'):
            link = item.find(class_ = 'info-primary').find('a').get('href')
            lid = item.find(class_ = 'info-primary').find('a').get('data-lid')
            ka = item.find(class_ = 'info-primary').find('a').get('ka')

            url = 'https://www.zhipin.com' + link + '?ka=' + ka + '&lid=' + lid

            self.save_position(url)
            time.sleep(1)

        if soup.find(class_ = 'next') and soup.find(class_ = 'next').get('href') != 'javascript:;':
            time.sleep(2)
            self.scrapy_position_list('https://www.zhipin.com' + soup.find(class_ = 'next').get('href'))

    def save_position(self, url):
        self.driver.get(url)

        if self.driver.page_source.find('为了您的账号安全') != -1:
            soup = BeautifulSoup(self.driver.page_source, 'lxml')

            code_url = 'https://www.zhipin.com' + soup.find(class_ = 'code').get('src')
            code = self.get_code(code_url)

            if code:
                self.driver.find_element_by_css_selector('#captcha').send_keys(code)
                self.driver.find_element_by_css_selector('.btn').click()

                time.sleep(3)
            else:
                return

        time.sleep(1)

        page_source = self.driver.page_source
        soup = BeautifulSoup(page_source)

        name = soup.find('h1').text
        salary = soup.find(class_ = 'info-primary').find(class_ = 'salary').text.strip()
        location = soup.find(class_ = 'info-primary').find('p').contents[0]
        exp = soup.find(class_ = 'info-primary').find('p').contents[2]
        edu = soup.find(class_ = 'info-primary').find('p').contents[-1]
        description = soup.find(class_ = 'job-sec').find(class_ = 'text').get_text().strip()
        company = soup.find(class_ = 'company-info').find(class_ = 'info').get_text().strip().split()[0]
        place = soup.find(class_ = 'location-address').text
        date = ''
        welfare = []
        industry = soup.find(attrs = {'ka': 'job-detail-brandindustry'}).text
        logoUrl = ''

        if exp.find('不限') != -1 or exp == '应届生':
            exp = []

        if salary:
            salary = (np.array(re.findall('\d+', salary), dtype = int) * 1000).tolist()
        else:
            salary = []

        item = {
            'url': url,
            'type': '',
            'name': name,
            'salary': salary,
            'location': location,
            'count': '',
            'exp': exp,
            'edu': edu,
            'date': date.split(' ')[0],
            'lang': '',
            'welfare': welfare,
            'description': description,
            'place': place,
            'company': company,
            'compId': '',
            'logoUrl': logoUrl,
            'major': '',
            'funType': '',
            'industry': industry,
            'platform': 'boss',
            'compsize': '',
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        if name and not qushu_qiancheng_position.find_one({'name': name, 'company': company, 'location': location}):
            qushu_qiancheng_position.insert_one(item)
            pprint.pprint(item)


def start(menus):
    BOSS(menus)


tech_keywords = [
    'html5', '架构师', '软件测试', '硬件交互设计师', '深度学习', '数据分析', '运维总监', 'CTO', '硬件测试', 'Android', '机器学习', '网络安全',
    '机器视觉', 'CMO', '自然语言处理', '数据挖掘', '网络工程师', '数据库', '算法工程师', 'Python', 'Ruby', '游戏特效', '前端', '游戏开发', 'golang',
    'erlang', '网页设计', '全栈工程师', '搜索算法', 'Hadoop', '运维工程师', 'BI工程师', 'iOS', 'C++', 'c#', 'Java', 'DB2', '.net', 'delphi',
    '图像识别', 'PHP', '语音识别', 'perl', 'ruby', 'node.js', 'erlang', '爬虫', '数据采集', 'u3d', '测试工程师', '功能测试', '性能测试', '游戏测试',
    'Flash', '技术合伙人', '系统工程师', '桌面开发', 'WEB安全', '游戏策划', 'Oracle', 'COCOS2D', 'linux', 'unix', 'etl', '数据仓库', 'unity3d',
    '目标检测', '推荐系统', '安卓开发', '', '云计算', '大数据', '移动端测试', '推荐算法', '驱动开发', '自动驾驶', '图像算法', '音频算法',
    '运维工程师', '网络工程师', 'IT技术支持', 'DBA', '算法研究员', 'shell', '区块链', '手机测试', '运维开发工程师',
    '嵌入式', 'windows开发', '人工智能', 'spark', '机器视觉', '智能问答', '人脸识别', 'ai研究员', '知识图谱', '技术总监', '技术主管', '技术副总裁', '数据平台'
]

if __name__ == '__main__':
    from optparse import OptionParser

    parser = OptionParser(usage = "%prog [options]")
    parser.add_option("-p", "--process", action = "store", type = "int", dest = "count", help = "the count of process", default = 3)
    parser.add_option("-e", "--network", action = "store", type = "str", dest = "address", help = "the address of mongodb", default = 'localhost')

    options, args = parser.parse_args()

    client = MongoClient(options.address, 27017, connect = False)

    qushu_qiancheng_db = client['qushu_qiancheng_db']
    qushu_qiancheng_position = qushu_qiancheng_db['qushu_qiancheng_position']

    with open('keywords.txt', 'r') as file:
        keywords = list(set([word.strip() for word in file.readlines()] + tech_keywords))
        random.shuffle(keywords)

    proc_list = []

    seg = int(len(keywords) / options.count)

    for i in range(options.count):
        p = Process(target = start, args = (keywords[seg * i:seg * (i + 1)],))
        proc_list.append(p)

    for p in proc_list:
        p.start()

    for p in proc_list:
        p.join()
