# -*- coding: utf-8 -*-
import sys

sys.path.append('/root/anaconda3/lib/python3.6/site-packages')

from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
# from selenium.webdriver import FirefoxOptions
from multiprocessing import Process
from multiprocessing import Pool
from pymongo import MongoClient
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
from urllib import request
from urllib import parse

from selenium import webdriver
import numpy as np
import requests
import platform
import datetime
import pymongo
import smtplib
import random
import pprint
import codecs
import math
import json
import time
import ssl
import os
import re


def comp_id():
    seq = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
    id = []

    for i in range(30):
        id += random.sample(seq, 1)

    return ''.join(id)


def fill_code():
    time.sleep(40)


# 代理IP地址
proxy_ip_api = ''

def chrome_driver():
    # print(os.path.join(os.getcwd(),'boss','proxy-ip'))
    # with open(os.path.join(os.getcwd(),'boss','proxy-ip'), 'r') as f:
    #     proxy_ip_api = f.read().strip()

    if proxy_ip_api:
        while True:
            r = requests.get(proxy_ip_api)

            if isinstance(r.json()['msg'], str):
                time.sleep(5 + random.random())
            else:
                break

        proxy = r.json()['msg'][0]['ip'] + ':' + r.json()['msg'][0]['port']
    else:
        proxy = ''

    if 'linux' in platform.platform().lower():
        options = webdriver.FirefoxOptions()
    else:
        options = webdriver.ChromeOptions()

    options.add_argument('--headless')
    options.add_argument('--disable-gpu')
    options.add_argument('--disable-images')

    desired_capabilities = options.to_capabilities()

    if proxy:
        desired_capabilities['proxy'] = {
            "httpProxy": proxy,
            "ftpProxy": proxy,
            "sslProxy": proxy,
            "noProxy": None,
            "proxyType": "MANUAL",
            "class": "org.openqa.selenium.Proxy",
            "autodetect": False
        }

    if 'linux' in platform.platform().lower():
        binary = FirefoxBinary('/usr/local/firefox/firefox')
        driver = webdriver.Firefox(desired_capabilities = desired_capabilities, firefox_binary = binary)
    else:
        executable_path = os.getcwd().replace('boss', 'libs/chromedriver')
        driver = webdriver.Chrome(desired_capabilities = desired_capabilities, executable_path = executable_path)

    driver.set_page_load_timeout(10)

    return driver


client = MongoClient('127.0.0.1', 27017, connect = False)

# 职位总库
knx_all_position_db = client['knx_all_position_db_demo']
knx_all_position_coll = knx_all_position_db['knx_all_position_coll_demo']

# 企业总库
knx_all_enterprise_db = client['knx_all_enterprise_db_demo']
knx_all_enterprise_coll = knx_all_enterprise_db['knx_all_enterprise_coll_demo']


class BOSS():
    def __init__(self, codes, menus):
        self.driver = chrome_driver()
        self.start(codes, menus)

    def start(self, codes, menus):
        if len(menus):
            for query in menus:
                for e in ['102', '103', '104']:
                    for d in ['203', '204', '205']:
                        for s in ['306', '305', '304']:
                            url = 'https://www.zhipin.com/c100010000/e_' + e + '-d_' + d + '-s_' + s + '-h_100010000/?query=' + query
                            try:
                                self.scrapy_position_list(url)
                            except:
                                continue

                                # if len(codes):
                                #     for c in codes:
                                #         for e in ['102', '103', '104']:
                                #             for d in ['203', '204', '205']:
                                #                 for s in ['306', '305', '304']:
                                #                     url = 'https://www.zhipin.com/c100010000-p' + str(c) + '/e_' + e + '-d_' + d + '-s_' + s
                                #                     self.scrapy_position_list(url)

    def scrapy_position_list(self, url = ''):
        if url:
            try:
                self.driver.get(url)
            except:
                self.driver.close()
                self.driver.quit()

                self.driver = chrome_driver()
                return self.scrapy_position_list(url)

            time.sleep(1)

            if self.driver.page_source.find('为了您的账号安全') != -1:
                self.driver.close()
                self.driver.quit()

                self.driver = chrome_driver()
                return self.scrapy_position_list(url)

        page_source = self.driver.page_source

        soup = BeautifulSoup(page_source)

        for item in soup.select('.job-list li'):
            # name = item.find(class_ = 'job-title').text
            # salary = item.find(class_ = 'red').text

            info_primary = item.find(class_ = 'info-primary').find('p').get_text()
            info_company = item.find(class_ = 'info-company').find('p').get_text()

            if info_primary.find('大专') != -1 or info_primary.find('高中') != -1 or info_primary.find('中专') != -1:
                continue

            # if info_primary.find('年') != -1:
            #     continue

            # if info_company.find('0-20人') != -1 or info_company.find('20-99人') != -1:
            #     continue

            link = item.find(class_ = 'info-primary').find('a').get('href')
            lid = item.find(class_ = 'info-primary').find('a').get('data-lid')
            ka = item.find(class_ = 'info-primary').find('a').get('ka')

            url = 'https://www.zhipin.com' + link + '?ka=' + ka + '&lid=' + lid

            try:
                self.save_position(url)
            except:
                continue

            time.sleep(1)

        if soup.find(class_ = 'next') and soup.find(class_ = 'next').get('href') != 'javascript:;':
            time.sleep(2)
            self.scrapy_position_list('https://www.zhipin.com' + soup.find(class_ = 'next').get('href'))

    def save_position(self, url):
        try:
            self.driver.get(url)
        except:
            self.driver.close()
            self.driver.quit()

            self.driver = chrome_driver()
            return self.save_position(url)

        time.sleep(random.random())

        if self.driver.page_source.find('为了您的账号安全') != -1:
            self.driver.close()
            self.driver.quit()

            self.driver = chrome_driver()
            return self.save_position(url)

        page_source = self.driver.page_source
        soup = BeautifulSoup(page_source)

        try:
            name = soup.find('h1').text
            salary = soup.find(class_ = 'info-primary').find(class_ = 'badge').text
            location = soup.find(class_ = 'info-primary').find(text = re.compile('城市：')).parent.text.split('：')[1].replace('经验', '')
            exp = soup.find(class_ = 'info-primary').find(text = re.compile('经验：')).parent.text.split('：')[2].replace('学历', '')
            edu = soup.find(class_ = 'info-primary').find(text = re.compile('学历：')).parent.text.split('：')[3]
            description = soup.find(class_ = 'job-sec').find(class_ = 'text').get_text().strip()
            company = soup.find(class_ = 'info-company').find('h3').get_text()
            place = soup.find(class_ = 'location-address').text
            date = soup.find(class_ = 'job-author').get_text().replace('发布于', '')
            welfare = soup.find(class_ = 'job-tags').get_text().split()
            industry = soup.find(attrs = {'ka': 'job-detail-brandindustry'}).text
            logoUrl = soup.find(class_ = 'company-logo').find('img').get('src')
        except:
            return

        if exp.find('不限') != -1 or exp == '应届生':
            exp = []

        if salary:
            salary = (np.array(re.findall('\d+', salary), dtype = int) * 1000).tolist()
        else:
            salary = []

        item = {
            'url': url,
            'type': '',
            'name': name,
            'salary': salary,
            'location': location,
            'count': '',
            'exp': exp,
            'edu': edu,
            'date': date.split(' ')[0],
            'lang': '',
            'welfare': welfare,
            'description': description,
            'place': place,
            'company': company,
            'compId': '',
            'ranking_score': -1,
            'logoUrl': logoUrl,
            'major': '',
            'funType': '',
            'industry': industry,
            'platform': 1,
            'compsize': '',
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        if not knx_all_position_coll.find_one({'name': name, 'company': company, 'location': location}):
            knx_all_position_coll.insert_one(item)
            print(item['company'], item['name'], 'inserted [message from boss]')
            time.sleep(1)
            self.save_enterprise(company, 'https://www.zhipin.com' + soup.find(class_ = 'info-company').find('h3').find('a').get('href'))
        else:
            print(item['company'], item['name'], 'existed [message from boss]')

    def save_enterprise(self, name, url):
        if knx_all_enterprise_coll.find_one({'name': name}):
            return

        url = url.split('?')[0].replace('gongsir', 'gongsi') + '?ka=company-intro'

        try:
            self.driver.get(url)
        except:
            self.driver.close()
            self.driver.quit()

            self.driver = chrome_driver()
            return self.save_enterprise(name, url)

        time.sleep(1)

        if self.driver.page_source.find('为了您的账号安全') != -1:
            self.driver.close()
            self.driver.quit()

            self.driver = chrome_driver()
            return self.save_enterprise(name, url)

        page_source = self.driver.page_source
        soup = BeautifulSoup(page_source)

        try:
            name = soup.find('h1').get_text()
        except:
            name = ''

        try:
            logoUrl = soup.find(class_ = 'company-logo').find('img').get('src')
        except:
            logoUrl = ''

        try:
            description = soup.find(class_ = 'text fold-text').get_text()
        except:
            description = ''

        try:
            size = (np.array(re.findall('\d+\-\d+', soup.find(class_ = 'info-primary').get_text())[0].split('-'), dtype = int)).tolist()
        except:
            size = ''

        try:
            industry = re.split('\d+\-\d+人', soup.find(class_ = 'info-primary').get_text())[1].strip()
        except:
            industry = ''

        if not name:
            return

        item = {
            'name': name,
            'type': '',
            'site': '',
            'url': url,
            'size': size,
            'logoUrl': logoUrl,
            'industry': industry,
            'location': '',
            'platform': 1,
            'compId': comp_id(),
            'description': description,
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        if not knx_all_enterprise_coll.find_one({'name': name}):
            knx_all_enterprise_coll.insert_one(item)
            print(name, '[message from boss]')


def start(codes, menus):
    BOSS(codes, menus)


if __name__ == '__main__':
    # lagou_menus = [i.get_text().lower() for i in BeautifulSoup(requests.get('https://www.lagou.com/').text).find(class_ = 'mainNavs').findAll('a')]
    # boss_menus = [i.get_text().lower() for i in BeautifulSoup(requests.get('https://www.zhipin.com/?ka=header-home').text, 'lxml').select('.job-menu a')]
    #
    # menus = list(set(lagou_menus + boss_menus))
    menus = [
        "实习",
        "实习生",
        "见习",
        "校招",
        "秋招",
        "夏招",
        "冬招",
        "春招",
        "毕业",
        "毕业生",
        "应届",
        "应届生",
        "2018届",
        "2019届",
        "2019年",
        "2018年",
        "管培生",
        "培训生",
        "19年",
        "18年",
        "18届",
        "19届"
    ]

    pool = []

    step = math.ceil(len(menus) / 5)

    for i in range(5):
        p = Process(target = start, args = ([], menus[i * step:(i + 1) * step]))
        pool.append(p)

    for p in pool:
        p.start()

    for p in pool:
        p.join()
