# -*- coding: utf-8 -*-
import sys

sys.path.append('/root/anaconda3/lib/python3.6/site-packages')
sys.path.append('C:\\Users\\OldDog5588\\AppData\\Local\\Programs\\Python\\Python37\\Lib\\site-packages')

from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.chrome.options import Options
from multiprocessing import Process
from multiprocessing import Pool
from pymongo import MongoClient
from selenium import webdriver
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
from urllib import request
from urllib import parse

import numpy as np
import requests
import datetime
import platform
import pymongo
import random
import codecs
import pprint
import math
import json
import time
import ssl
import os
import re

if sys.stdout.encoding != 'UTF-8':
    sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict')
if sys.stderr.encoding != 'UTF-8':
    sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict')

if 'linux' in platform.platform().lower():
    opts = webdriver.FirefoxOptions()
    opts.add_argument('--headless')

    binary = FirefoxBinary('/usr/local/firefox/firefox')
    version = 'chromedriver_linux'
    executable_path = os.getcwd().replace('lagou', 'libs/' + version)

    driver = webdriver.Firefox(firefox_options = opts, firefox_binary = binary)
    driver2 = webdriver.Firefox(firefox_options = opts, firefox_binary = binary)
    driver3 = webdriver.Firefox(firefox_options = opts, firefox_binary = binary)
else:
    chrome_options = Options()
    chrome_options.add_argument('--headless')

    version = 'chromedriver'
    executable_path = os.getcwd().replace('lagou', 'libs/' + version)

    driver = webdriver.Chrome(executable_path = executable_path, chrome_options = chrome_options)
    driver2 = webdriver.Chrome(executable_path = executable_path, chrome_options = chrome_options)
    driver3 = webdriver.Chrome(executable_path = executable_path, chrome_options = chrome_options)

driver.get('https://www.lagou.com/')
driver2.get('https://www.lagou.com/')
driver3.get('https://www.lagou.com/')

client = MongoClient('127.0.0.1', 27017, connect = False)

# 职位总库
knx_all_position_db = client['knx_all_position_db']
knx_all_position_coll = knx_all_position_db['knx_all_position_coll']

# 企业总库
knx_all_enterprise_db = client['knx_all_enterprise_db']
knx_all_enterprise_coll = knx_all_enterprise_db['knx_all_enterprise_coll']


def comp_id():
    seq = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
    id = []

    for i in range(30):
        id += random.sample(seq, 1)

    return ''.join(id)


class LAGOU_POST():
    def __init__(self, menus):
        self.cookie = dict()
        self.menus = menus
        self.parse_index()

    def login(self):
        pass

    def parse_index(self):
        for menu in self.menus:
            try:
                url_for_school = 'https://www.lagou.com/jobs/list_' + menu + '?px=default&gx=%E5%85%A8%E8%81%8C&gj=&xl=%E6%9C%AC%E7%A7%91,%E7%A1%95%E5%A3%AB,%E5%8D%9A%E5%A3%AB&jd=C%E8%BD%AE,D%E8%BD%AE%E5%8F%8A%E4%BB%A5%E4%B8%8A,%E4%B8%8A%E5%B8%82%E5%85%AC%E5%8F%B8,%E4%B8%8D%E9%9C%80%E8%A6%81%E8%9E%8D%E8%B5%84&isSchoolJob=1&city=%E5%85%A8%E5%9B%BD#filterBox'
                url_for_social = 'https://www.lagou.com/jobs/list_' + menu + '?px=default&gj=3%E5%B9%B4%E5%8F%8A%E4%BB%A5%E4%B8%8B&xl=%E6%9C%AC%E7%A7%91,%E7%A1%95%E5%A3%AB,%E5%8D%9A%E5%A3%AB&jd=C%E8%BD%AE,D%E8%BD%AE%E5%8F%8A%E4%BB%A5%E4%B8%8A,%E4%B8%8A%E5%B8%82%E5%85%AC%E5%8F%B8,%E4%B8%8D%E9%9C%80%E8%A6%81%E8%9E%8D%E8%B5%84&city=%E5%85%A8%E5%9B%BD#filterBox'

                for url in [url_for_school, url_for_social]:
                    driver.get(url)
                    time.sleep(5)
                    self.search_position()
            except:
                continue

    def search_position(self):
        page_source = driver.page_source
        soup = BeautifulSoup(page_source)

        for item in soup.select('.item_con_list li'):
            self.save_position(item.find(class_ = 'position_link').get('href'))
            time.sleep(random.random())

        time.sleep(3)

        try:
            js = "document.documentElement.scrollTop=10000"
            driver.execute_script(js)
        except:
            pass

        try:
            if driver.find_element_by_class_name('pager_next_disabled'):
                return
        except:
            pass

        try:
            if driver.find_element_by_class_name('pager_next '):
                driver.find_element_by_class_name('pager_next ').click()
                time.sleep(2)
                self.search_position()
        except:
            pass

    def save_position(self, url):
        driver2.get(url)
        time.sleep(1)

        soup = BeautifulSoup(driver2.page_source)

        try:
            place = soup.find(class_ = 'work_addr').get_text().replace(' ', '').replace('查看地图', '').strip()
        except:
            place = ''

        try:
            description = soup.find(class_ = 'job_bt').get_text().strip()
        except:
            description = ''

        try:
            welfare = soup.find(class_ = 'position-label').get_text().strip().split()
        except:
            welfare = []

        try:
            industry = soup.find(class_ = 'icon-glyph-fourSquare').parent.get_text().replace('领域', '').strip()
        except:
            industry = ''

        try:
            company = soup.find(class_ = 'job_company').find('h2').text.replace('拉勾认证企业', '').replace('拉勾未认证企业', '').strip()
        except:
            company = ''

        try:
            name = soup.find(class_ = 'job-name').get('title')
        except:
            name = ''

        try:
            date = soup.find(class_ = 'publish_time').text.split()[0]

            if len(date.split(':')) == 2:
                date = datetime.datetime.now().strftime('%Y-%m-%d')
        except:
            date = ''

        try:
            salary = soup.find(class_ = 'salary').text

            if salary.find('k') != -1:
                salary = (np.array(re.findall('\d+', salary), dtype = int) * 1000).tolist()
        except:
            salary = []

        try:
            edu = soup.find(class_ = 'job_request').get_text().split('/')[3].strip().replace('及以上', '')
        except:
            edu = []

        try:
            exp = soup.find(class_ = 'job_request').get_text().split('/')[2]
            exp = np.array(re.findall('\d+', exp), dtype = int).tolist()
        except:
            exp = []

        try:
            location = soup.find(class_ = 'job_request').get_text().split('/')[1]
        except:
            location = ''

        try:
            logoUrl = 'http:' + soup.find(class_ = 'job_company').find('dt').find('a').find('img').get('src')
        except:
            logoUrl = ''

        item = {
            'url': url,
            'type': '',
            'name': name,
            'salary': salary,
            'location': location,
            'count': '',
            'exp': exp,
            'edu': edu,
            'date': date,
            'lang': '',
            'welfare': welfare,
            'description': description,
            'place': place,
            'company': company,
            'compId': '',
            'ranking_score': -1,
            'logoUrl': logoUrl,
            'major': '',
            'funType': '',
            'industry': industry,
            'platform': 2,
            'compsize': '',
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        if item['name'] == '':
            print('页面返回错误')
            return

        if not knx_all_position_coll.find_one({'name': item['name'], 'company': item['company'], 'location': item['location']}):
            knx_all_position_coll.insert_one(item)
            print(item['company'], item['name'], 'inserted [message from lagou]')

            companyName = soup.find(id = 'job_company').find('h2').get_text().replace('拉勾认证企业', '').replace('拉勾未认证企业', '').strip()
            companyId = soup.find(id = 'companyid').get('value')

            self.save_enterprise(companyId, companyName)
        else:
            print(item['company'], item['name'], 'existed [message from lagou]')

    def save_enterprise(self, companyId, companyName):
        if knx_all_enterprise_coll.find_one({'name': companyName}):
            return

        url = 'https://www.lagou.com/gongsi/' + str(companyId) + '.html'
        driver3.get(url)
        time.sleep(2)

        try:
            driver3.find_element_by_class_name('text_over').click()
        except:
            pass

        soup = BeautifulSoup(driver3.page_source)

        try:
            name = soup.find(class_ = 'company_info').find('h1').get_text().strip()
        except:
            name = ''

        try:
            logoUrl = 'http://' + soup.find(class_ = 'top_info_wrap').find('img').get('src')[2:]
        except:
            logoUrl = ''

        try:
            description = soup.find(id = 'company_intro').find(class_ = 'item_content').get_text().strip().replace('收起', '')
        except:
            description = ''

        try:
            industry = soup.find(id = 'basic_container').find(class_ = 'type').find_next_sibling().get_text()
        except:
            industry = ''

        try:
            type = soup.find(id = 'basic_container').find(class_ = 'process').find_next_sibling().get_text()
        except:
            type = ''

        try:
            size = soup.find(id = 'basic_container').find(class_ = 'number').find_next_sibling().get_text()
            size = re.findall('\d+', size)
            size = np.array(size, dtype = 'int').tolist()
        except:
            size = []

        try:
            location = soup.find(id = 'basic_container').find(class_ = 'address').find_next_sibling().get_text()
        except:
            location = ''

        item = {
            'name': name,
            'type': type,
            'site': '',
            'url': url,
            'size': size,
            'logoUrl': logoUrl,
            'industry': industry,
            'location': location,
            'platform': 2,
            'compId': comp_id(),
            'description': description,
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        if not knx_all_enterprise_coll.find_one({'name': name}):
            knx_all_enterprise_coll.insert_one(item)
            print(name, '[message from lagou]')

    def parse_salary(self, salary):
        if salary.find('元/月') != -1:
            salary = (np.array(re.findall('\d+', salary), dtype = int) * 1000).tolist()
            return salary

        return salary


def start(menus):
    LAGOU_POST(menus)


if __name__ == '__main__':
    # lagou_menus = [i.get_text().lower() for i in BeautifulSoup(requests.get('https://www.lagou.com/').text).find(class_ = 'mainNavs').findAll('a')]
    # boss_menus = [i.get_text().lower() for i in BeautifulSoup(requests.get('https://www.zhipin.com/?ka=header-home').text, 'lxml').select('.job-menu a')]
    # menus = set(lagou_menus + boss_menus)

    menus = [
        "实习",
        "实习生",
        "见习",
        "校招",
        "秋招",
        "夏招",
        "冬招",
        "春招",
        "毕业",
        "毕业生",
        "应届",
        "应届生",
        "2018届",
        "2019届",
        "2019年",
        "2018年",
        "管培生",
        "培训生",
        "19年",
        "18年",
        "18届",
        "19届"
    ]

    start(menus)
