import datetime
import pprint
import queue
import random
import re
import time
from multiprocessing import Process

import numpy as np
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient
import jieba.posseg as pseg
from selenium import webdriver

client = MongoClient('127.0.0.1', 27017, connect = False)

qushu_qiancheng_db = client['qushu_qiancheng_db']
qushu_qiancheng_position = qushu_qiancheng_db['qushu_qiancheng_position']

lagou_invalide_url_db = client['lagou_invalide_url_db']
lagou_invalide_url_coll = lagou_invalide_url_db['lagou_invalide_url_coll']

lagou_visited_url_db = client['lagou_visited_url_db']
lagou_visited_url_coll = lagou_visited_url_db['lagou_visited_url_coll']


def token(text):
    return [(tok, pos) for tok, pos in pseg.lcut(text.lower()) if pos not in ['x', 'uj', 'm', 'c'] and len(tok) > 1]


def parse_position_content(page_source, url):
    if lagou_invalide_url_coll.find_one({'url': url}, no_cursor_timeout = True) or lagou_visited_url_coll.find_one({'url': url}, no_cursor_timeout = True):
        return

    soup = BeautifulSoup(page_source, 'html.parser')

    try:
        place = soup.find(class_ = 'work_addr').get_text().replace(' ', '').replace('查看地图', '').strip()
    except:
        place = ''

    try:
        description = soup.find(class_ = 'job_bt').get_text().strip()
    except:
        description = ''

    try:
        welfare = soup.find(class_ = 'position-label').get_text().strip().split()
    except:
        welfare = []

    try:
        industry = soup.find(class_ = 'icon-glyph-fourSquare').parent.get_text().replace('领域', '').strip()
    except:
        industry = ''

    try:
        company = soup.find(class_ = 'job_company').find('h2').text.replace('拉勾认证企业', '').replace('拉勾未认证企业', '').strip()
        company = company.replace('资质已认证', '').replace('资质未认证', '').strip()
    except:
        company = ''

    try:
        full_company = soup.find(id = 'job_company').find('img').get('alt').replace('资质已认证', '').replace('资质未认证', '').strip()
    except:
        full_company = ''

    try:
        name = soup.find(class_ = 'job-name').get('title').strip()
    except:
        name = ''

    try:
        date = soup.find(class_ = 'publish_time').text.split()[0]

        if len(date.split(':')) == 2:
            date = datetime.datetime.now().strftime('%Y-%m-%d')
    except:
        date = ''

    try:
        salary = soup.find(class_ = 'salary').text

        if salary.find('k') != -1:
            salary = (np.array(re.findall('\d+', salary), dtype = int) * 1000).tolist()
    except:
        salary = []

    try:
        edu = soup.find(class_ = 'job_request').get_text().split('/')[3].strip().replace('及以上', '')
    except:
        edu = []

    try:
        exp = soup.find(class_ = 'job_request').get_text().split('/')[2]
        exp = np.array(re.findall('\d+', exp), dtype = int).tolist()
    except:
        exp = []

    try:
        location = soup.find(class_ = 'job_request').get_text().split('/')[1].strip()
    except:
        location = ''

    try:
        logoUrl = 'http:' + soup.find(class_ = 'job_company').find('dt').find('a').find('img').get('src')
    except:
        logoUrl = ''

    if not name:
        return False

    item = {
        'url': url,
        'type': '',
        'name': name,
        'salary': salary,
        'location': location,
        'count': '',
        'exp': exp,
        'edu': edu,
        'date': date,
        'lang': '',
        'welfare': welfare,
        'description': description,
        'place': place,
        'company': company,
        'full_company': full_company,
        'logoUrl': logoUrl,
        'major': '',
        'funType': '',
        'industry': industry,
        'platform': 'lagou',
        'compsize': '',
        'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    }

    try:
        item_found = qushu_qiancheng_position.find_one({'name': name, 'company': re.compile(company), 'location': location}, no_cursor_timeout = True)
    except:
        return 1

    if not item_found:
        pprint.pprint(item)
        item['token'] = token(item['description'])
        qushu_qiancheng_position.insert_one(item)
        return 1

    if 'full_company' in item_found:
        lagou_visited_url_coll.insert_one({'url': url})
    else:
        qushu_qiancheng_position.update_one({'_id': item_found['_id']}, {'$set': {'full_company': full_company}})
        print(name, company, location, '==>', full_company)
        lagou_visited_url_coll.insert_one({'url': url})

    return 1


def new_driver(headless = False):
    chrome_options = webdriver.ChromeOptions()

    if headless:
        chrome_options.add_argument('--headless')

    chrome_options.add_argument('--start-maximized')

    driver = webdriver.Chrome(options = chrome_options)
    return driver


def restart_router():
    driver = new_driver()
    driver.get('http://192.168.1.1/cgi-bin/luci')
    driver.find_element_by_css_selector('#login_username').clear()
    driver.find_element_by_css_selector('#login_username').send_keys('useradmin')
    driver.find_element_by_css_selector('#login_password').clear()
    driver.find_element_by_css_selector('#login_password').send_keys('vdhfd')
    driver.find_element_by_css_selector('#login_form button').click()

    time.sleep(5)

    driver.find_element_by_css_selector('#menu_action_restart_hint').click()
    time.sleep(2)
    driver.find_element_by_css_selector('#pop_window_option #confirm').click()
    time.sleep(3)
    driver.quit()


def start(begin, end, proc_idx):
    driver = new_driver(headless = True)
    n = 0
    limited = 2000000

    for i in range(10000000000):
        if n >= limited:
            break

        url = 'https://www.lagou.com/jobs/' + str(random.randint(begin, end)) + '.html'

        if lagou_invalide_url_coll.find_one({'url': url}) or lagou_visited_url_coll.find_one({'url': url}):
            print('%s continued' % url)
            continue

        try:
            driver.get(url)
        except:
            driver.quit()

            if proc_idx == 0:
                print('网络出现故障,正在重启路由器......')
                restart_router()

            time.sleep(300)
            driver = new_driver(headless = True)
            continue

        if 'login.html' in driver.current_url:
            driver.quit()
            driver = new_driver(headless = True)

            try:
                driver.get(url)
            except:
                time.sleep(5)
                continue

            time.sleep(2)

        if '您已被封禁' in driver.page_source or '当前账号存在异常，请验证后继续访问' in driver.page_source:
            driver.quit()

            if proc_idx == 0:
                print('您已被封禁,正在重启路由器......')
                restart_router()

            time.sleep(300)
            driver = new_driver(headless = True)
            continue

        if '你来晚了' in driver.page_source or '您访问的链接' in driver.page_source:
            lagou_invalide_url_coll.insert_one({'url': url})
            continue

        parse_position_content(driver.page_source, url)
        n += 1

    driver.quit()


if __name__ == '__main__':
    proc_list = []
    proc_count = 15

    seg = int(6000000 / proc_count)

    for i in range(proc_count):
        proc_list.append(Process(target = start, args = (seg * i, seg * (i + 1), i,)))

    for p in proc_list:
        p.start()

    for p in proc_list:
        p.join()
