# -*- coding: utf-8 -*-

import sys

sys.path.append('/root/anaconda3/lib/python3.6/site-packages')

from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.chrome.options import Options
# from selenium.webdriver import FirefoxOptions
from multiprocessing import Process
from multiprocessing import Pool
from pymongo import MongoClient
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
from urllib import request
from urllib import parse

from selenium import webdriver
import numpy as np
import requests
import platform
import datetime
import pymongo
import smtplib
import random
import pprint
import codecs
import math
import json
import time
import ssl
import os
import re

client = MongoClient('127.0.0.1', 27017, connect = False)

# 职位总库
knx_all_position_db = client['knx_all_position_db']
knx_all_position_coll = knx_all_position_db['knx_all_position_coll']

# 企业总库
knx_all_enterprise_db = client['knx_all_enterprise_db']
knx_all_enterprise_coll = knx_all_enterprise_db['knx_all_enterprise_coll']


def comp_id():
    seq = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
    id = []

    for i in range(30):
        id += random.sample(seq, 1)

    return ''.join(id)


def new_driver():
    if 'linux' in platform.platform().lower():
        binary = FirefoxBinary('/usr/local/firefox/firefox')
        opts = webdriver.FirefoxOptions()
        opts.add_argument('--headless')

        version = 'chromedriver_linux'
        executable_path = os.getcwd().replace('dajie', 'libs/' + version)

        driver = webdriver.Firefox(firefox_options = opts, firefox_binary = binary)
    else:
        chrome_options = Options()
        chrome_options.add_argument('--headless')

        version = 'chromedriver'
        executable_path = os.getcwd().replace('dajie', 'libs/' + version)

        driver = webdriver.Chrome(executable_path = executable_path, chrome_options = chrome_options)

    return driver


class DAJIE():
    def __init__(self, begin, end):
        chrome_options = Options()
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('--disable-images')

        # version = 'chromedriver.exe' if 'windows' in platform.platform().lower() else 'chromedriver'
        # executable_path = os.getcwd().replace('dajie', 'libs/' + version)
        #
        # self.driver = webdriver.Chrome(executable_path = executable_path, chrome_options = chrome_options)
        # self.driver2 = webdriver.Chrome(executable_path = executable_path, chrome_options = chrome_options)
        # self.driver3 = webdriver.Chrome(executable_path = executable_path, chrome_options = chrome_options)

        self.driver = new_driver()
        self.driver2 = new_driver()
        self.driver3 = new_driver()

        for i in range(begin, end):
            url = 'https://www.dajie.com/corp/index-pa' + str(i) + '-ci-po-kw/'
            self.driver.get(url)
            soup = BeautifulSoup(self.driver.page_source)

            for item in soup.select('.listBox li'):
                level = int(re.sub("\D", "", item.find(class_ = 'level-inner').get('style')))

                if level <= 60:
                    continue

                job_count = int(item.find(class_ = 'jz-count').find(text = re.compile('在招职位')).find_next_sibling().text)

                if not job_count:
                    continue

                corp_id = item.find(class_ = 'attention').get('data-corp-id')

                self.crawl_position_list(corp_id, job_count)
                #
                time.sleep(5)

    def crawl_position_list(self, corp_id, job_count):
        for i in range(1, math.ceil(job_count / 10) + 1):
            url = 'https://www.dajie.com/corp/' + corp_id + '/joinus?page=' + str(i)
            self.driver2.get(url)
            time.sleep(1)
            soup = BeautifulSoup(self.driver2.page_source)

            # try:
            #     company = soup.find(class_ = 'cor-logo').find('h1').find('span').get('title')
            # except:
            #     pass

            for item in soup.select('.job-suggest-list li'):
                try:
                    # name = item.find('h3').find('a').get('title').strip()
                    link = 'https:' + item.find('h3').find('a').get('href').strip()
                    date = item.find(class_ = 'update-time').text
                except:
                    continue

                # try:
                #     location = item.find(class_ = 'search-more-info').find(text = re.compile('工作城市：')).parent.find_next_sibling().text.strip()
                # except:
                #     location = ''

                try:
                    salary = item.find(class_ = 'money').text
                except:
                    salary = ''

                self.crawl_position(link, date, salary)

    def crawl_position(self, url, date, salary_):
        self.driver3.get(url)
        time.sleep(1)
        soup = BeautifulSoup(self.driver3.page_source)

        try:
            name = soup.find(class_ = 'job-name').text.strip()
        except:
            name = ''

        try:
            salary = np.array(re.findall('\d+', salary_), dtype = int)

            if salary_.find('元/天') != -1:
                salary = salary * 30

            salary = salary.tolist()
        except:
            salary = []

        try:
            location = soup.find(class_ = 'job-msg-center').find(class_ = 'ads').get_text().strip()
        except:
            location = ''

        try:
            count = int(re.sub("\D", "", soup.find(class_ = 'job-msg-center').find(class_ = 'recruiting').get_text()))
        except:
            count = -1

        try:
            exp = soup.find(class_ = 'job-msg-center').find(class_ = 'exp').get_text().strip()

            if exp == '不限工作经验' or exp == '':
                exp = []
            else:
                exp = [int(re.sub("\D", "", exp))]
        except:
            exp = []

        try:
            edu = soup.find(class_ = 'job-msg-center').find(class_ = 'edu').get_text().replace('及以上', '')
        except:
            edu = ''

        try:
            welfare = soup.find(class_ = 'job-msg-bottom').get_text().strip().split()
        except:
            welfare = []

        try:
            description = soup.find(class_ = 'position-data ').get_text().strip()
        except:
            description = ''

        try:
            place = soup.find(class_ = 'ads-msg').find('span').text.strip()
        except:
            place = ''

        try:
            type = soup.find(class_ = 'i-corp-base-info').find(class_ = 'info').find(text = re.compile('性质')).find_next_sibling().text
        except:
            type = ''

        try:
            industry = soup.find(class_ = 'i-corp-base-info').find(class_ = 'info').find(text = re.compile('行业')).find_next_sibling().text
        except:
            industry = ''

        try:
            company = soup.find(class_ = 'i-corp-base-info').find(class_ = 'title').get_text().strip()
        except:
            company = ''

        try:
            compsize = int(re.sub("\D", "", soup.find(class_ = 'i-corp-base-info').find(class_ = 'info').find(text = re.compile('规模')).find_next_sibling().text))
        except:
            compsize = ''

        try:
            logoUrl = soup.find(class_ = 'i-corp-base-info').find('a').find('img').get('src')
        except:
            logoUrl = ''

        # 低学历要求不收录
        if edu in ['大专', '中专', '高中', '初中', '中技']:
            return

        # 薪资低于6000不收录
        # if isinstance(salary, list) and len(salary) and max(salary) < 6000:
        #     return

        # 对工作经验有要求的不收录
        # if len(exp):
        #     return

        item = {
            'url': url,
            'type': type,
            'name': name,
            'salary': salary,
            'location': location,
            'count': count,
            'exp': exp,
            'edu': edu,
            'date': date,
            'lang': '',
            'welfare': welfare,
            'description': description,
            'place': place,
            'company': company,
            'compId': '',
            'ranking_score': -1,
            'logoUrl': logoUrl,
            'major': '',
            'funType': '',
            'industry': industry,
            'platform': 3,
            'compsize': compsize,
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        if not knx_all_position_coll.find_one({'name': name, 'company': company, 'location': location}):
            try:
                knx_all_position_coll.insert_one(item)
                print(item['company'], item['name'], 'inserted [message from dajie]')
            except:
                print('insert error')

            self.crawl_enterprise(soup)
        else:
            print(item['company'], item['name'], 'existed [message from dajie]')

    def crawl_enterprise(self, soup):
        try:
            name = soup.find(class_ = 'i-corp-base-info').find(class_ = 'title').find('a').text.strip()
        except:
            name = ''

        try:
            size = soup.find(class_ = 'i-corp-base-info').find(class_ = 'info').find(text = re.compile('规模')).find_next_sibling().text
        except:
            size = ''

        try:
            industry = soup.find(class_ = 'i-corp-base-info').find(class_ = 'info').find(text = re.compile('行业')).find_next_sibling().text
        except:
            industry = ''

        try:
            type = soup.find(class_ = 'i-corp-base-info').find(class_ = 'info').find(text = re.compile('行业')).find_next_sibling().text
        except:
            type = ''

        try:
            description = soup.find(class_ = 'i-corp-desc').get_text()
        except:
            description = ''

        try:
            logoUrl = soup.find(class_ = 'i-corp-base-info').find('img').get('src')
        except:
            logoUrl = ''

        if not name:
            return

        item = {
            'name': name,
            'type': type,
            'site': '',
            'url': '',
            'size': size,
            'level': '',
            'logoUrl': logoUrl,
            'industry': industry,
            'location': '',
            'platform': 3,
            'compId': comp_id(),
            'description': description,
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        if not knx_all_enterprise_coll.find_one({'name': item['name']}):
            knx_all_enterprise_coll.insert_one(item)
            print(item['name'], 'inserted [message from dajie]')


def start(begin, end):
    DAJIE(begin, end)


if __name__ == '__main__':
    p1 = Process(target = start, args = (1, 60))
    p2 = Process(target = start, args = (60, 120))
    p3 = Process(target = start, args = (120, 180))
    p4 = Process(target = start, args = (180, 240))
    p5 = Process(target = start, args = (240, 301))

    p1.start()
    p2.start()
    p3.start()
    p4.start()
    p5.start()

    p1.join()
    p2.join()
    p3.join()
    p4.join()
    p5.join()
