# -*- coding: utf-8 -*-
import sys

sys.path.append('/root/anaconda3/lib/python3.6/site-packages')

from multiprocessing import Process
from multiprocessing import Pool
from pymongo import MongoClient
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
from urllib import request
from urllib import parse

import numpy as np
import requests
import datetime
import pymongo
import random
import codecs
import json
import pprint
import time
import ssl
import os
import re

if sys.stdout.encoding != 'UTF-8':
    sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict')
if sys.stderr.encoding != 'UTF-8':
    sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict')


# 职位总库
# knx_all_position_db = client['knx_all_position_db']
# knx_all_position_coll = knx_all_position_db['knx_all_position_coll']
#
# # 企业总库
# knx_all_enterprise_db = client['knx_all_enterprise_db']
# knx_all_enterprise_coll = knx_all_enterprise_db['knx_all_enterprise_coll']


def comp_id():
    seq = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
    id = []

    for i in range(30):
        id += random.sample(seq, 1)

    return ''.join(id)


import jieba.posseg as pseg


def token(text):
    return [(tok, pos) for tok, pos in pseg.lcut(text.lower()) if pos not in ['x', 'uj', 'm', 'c'] and len(tok) > 1]


headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'company.51job.com',
    'Pragma': 'no-cache',
    'Referer': 'https://company.51job.com/p1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
}

headers2 = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'jobs.51job.com',
    'Pragma': 'no-cache',
    'Referer': 'https://company.51job.com/p3',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
}

headers3 = {
    'Accept': '*/*',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Content-Type': 'application/x-www-form-urlencoded',
    'Host': 'jobs.51job.com',
    'Origin': 'https://jobs.51job.com',
    'Pragma': 'no-cache',
    'Referer': 'https://jobs.51job.com/all/co2608550.html',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
    'X-Requested-With': 'XMLHttpRequest'
}

headers4 = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'jobs.51job.com',
    'Pragma': 'no-cache',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
}

headers5 = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'jobs.51job.com',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'
}


class QIANCHENG_POST():
    def __init__(self, start, end):
        self.end = end
        self.start = start
        self.cookie = dict()
        self.comp_list_page = 1
        self.job_list_page = 1
        self.cur_company = ''
        self.base_url = 'https://company.51job.com/p'

        self.shielding_words = []

        self.scrapy_company_list()

    def scrapy_company_list(self):
        for p in range(self.end, self.start, -1):
            url = self.base_url + str(p)
            headers['Referer'] = url

            try:
                r = requests.get(url, headers = headers, cookies = self.cookie)
            except:
                continue

            page_source = r.text.encode("iso-8859-1").decode('gbk').encode('utf8').decode('utf8')
            soup = BeautifulSoup(page_source, 'lxml')

            self.cookie.update(r.cookies)
            self.comp_list_page = p

            for comp in soup.select('.c2-main .c2-t'):
                link = comp.find(class_ = 's1').find('a').get('href')
                name = comp.find(class_ = 's1').find('a').get('title')
                type = comp.find(class_ = 's2').get_text()
                size = comp.find(class_ = 's3').get_text()
                city = comp.find(class_ = 's4').get_text()
                industry = comp.find(class_ = 's5').get_text()

                if name.find('某') != -1:
                    continue

                item = {
                    'name': name,
                    'link': link,
                    'type': type,
                    'size': size,
                    'city': city,
                    'Referer': url,
                    'industry': industry
                }

                # self.scrapy_company(link, item, url)
                self.scrapy_position(item)

    def scrapy_position(self, comp):
        hidTotal = self.parse_hidTotal(comp)

        if not hidTotal:
            return

        for pageno in range(1, round(100000 / 20)):
            headers3['Referer'] = comp['link']

            try:
                r = requests.post(comp['link'], data = {'hidTotal': hidTotal, 'type': 'undefined', 'code': 'undefined',
                                                        'pageno': pageno}, cookies = self.cookie, headers = headers3)
            except:
                continue

            soup = BeautifulSoup(r.text, 'lxml')

            if len(soup.select('.el')) == 0:
                return

            for job in soup.select('.el'):
                link = job.find(class_ = 't1').find('a').get('href')
                name = job.find(class_ = 't1').find('a').get('title').strip()
                info = job.find(class_ = 't2').text.strip()
                location = job.find(class_ = 't3').text.strip()
                salary = job.find(class_ = 't4').text.strip()
                date = job.find(class_ = 't5').text.strip()

                # if len(date.split('-')) == 2:
                #     month = int(date.split('-')[0])
                #     day = int(date.split('-')[1])

                # if (datetime.datetime.now() - datetime.datetime(2018, month, day)).days >= 15:
                #     continue

                if qushu_qiancheng_position.find_one({'name': name, 'location': location, 'company': comp['name']}, no_cursor_timeout = True):
                    continue

                salary = self.parse_salary(salary)
                self.save_position(link, comp, salary, date, location)

            self.cookie.update(r.cookies)

    def save_position(self, url, comp, salary, date, location):
        if url.find('jobs.51job.com') == -1:
            return

        try:
            r = requests.get(url, headers = headers)
        except:
            return

        page_source = r.text
        soup = BeautifulSoup(page_source, 'lxml')

        try:
            name = soup.find(class_ = 'tHjob').find('h1').text.strip()
        except:
            name = ''

        try:
            company = soup.find(class_ = 'cname').find('a').text.strip()
        except:
            company = ''

        # try:
        #     location = soup.find(class_ = 'lname').text.strip()
        # except:
        #     location = ''

        # try:
        #     salary = self.parse_salary(soup.find(class_ = 'lname').find_next_sibling().text.strip())
        # except:
        #     salary = []

        try:
            description = soup.find(class_ = 'job_msg').text.strip()

            if description.find('职能类别') != -1:
                description = description[:description.find('职能类别')].strip()
        except:
            description = ''

        try:
            exp = re.findall('([^|\s]+)年经验', soup.select('.tHeader.tHjob .msg.ltype')[0].get_text().strip())

            if len(exp) and '-' in exp[0]:
                exp = exp[0].split('-')

            exp = np.array(exp, dtype = int).tolist()
        except:
            exp = []

        try:
            edu = ''

            for i in ['初中', '中专', '大专', '高中', '本科', '研究生', '硕士', '博士']:
                if i in soup.find(class_ = 'msg ltype').text.strip():
                    edu = i
                    break
        except:
            edu = ''

        try:
            count = int(re.sub("\D", "", soup.select('.jtag.inbox .t1 .i3')[0].parent.get_text()))
        except:
            count = ''

        try:
            lang = soup.select('.jtag.inbox .t1 .i5')[0].parent.get_text()
        except:
            lang = ''

        try:
            major = soup.select('.jtag.inbox .t1 .i6')[0].parent.get_text()
        except:
            major = ''

        try:
            welfare = [t.text for t in soup.find(class_ = 'jtag').select('span')]
        except:
            welfare = []

        try:
            funType = soup.find(text = re.compile('职能类别')).parent.find_next_sibling().text
        except:
            funType = ''

        try:
            place = soup.find(text = re.compile('上班地址')).parent.parent.get_text().strip().split('：')[1]
        except:
            place = ''

        try:
            industry = soup.find(class_ = 'com_tag').select('p')[2].get_text()
        except:
            industry = ''

        try:
            logoUrl = 'https:' + soup.find(class_ = 'himg').find('img').get('src')
        except:
            logoUrl = ''

        item = {
            'url': url,
            'edu': edu,
            'exp': exp,
            'name': name,
            'date': date,
            'lang': lang,
            'place': place,
            'major': major,
            'count': count,
            'salary': salary,
            'welfare': welfare,
            'funType': funType,
            'company': company,
            'location': location,
            'logoUrl': logoUrl,
            'compId': '',
            'industry': industry,
            'ranking_score': -1,
            'platform': 4,
            'compsize': comp['size'],
            'description': description,
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        if len(item['name']) == 0 or len(item['description']) == 0:
            return

        if not qushu_qiancheng_position.find_one({'name': name, 'company': company, 'location': location}, no_cursor_timeout = True):
            pprint.pprint(item)
            item['token'] = token(item['description'])
            qushu_qiancheng_position.insert_one(item)

        self.cookie.update(r.cookies)

    def scrapy_company(self, link, item, url):
        if knx_all_enterprise_coll.find_one({'name': item['name']}):
            return

        if link.find('jobs.51job.com') == -1:
            return

        try:
            headers5['Referer'] = url
            r = requests.get(link, headers = headers5, timeout = 5)
        except:
            return

        try:
            soup = BeautifulSoup(r.text, 'lxml')

            if not soup:
                return
        except:
            return

        try:
            name = soup.find('h1').text.strip()
        except:
            name = ''

        try:
            type = soup.find(class_ = 'tHeader').find(class_ = 'ltype').text.split()[0]
        except:
            type = ''

        try:
            size = soup.find(class_ = 'tHeader').find(class_ = 'ltype').text.split()[2]
        except:
            size = ''

        try:
            industry = soup.find(class_ = 'tHeader').find(class_ = 'ltype').text.split()[4]
        except:
            industry = ''

        try:
            description = soup.find(class_ = 'con_txt').get_text().strip()
        except:
            description = ''

        try:
            location = soup.find(text = re.compile('公司地址')).parent.parent.get_text().replace('公司地址：',
                                                                                             '').strip().replace(' ',
                                                                                                                 '')
        except:
            location = ''

        try:
            site = soup.find(text = re.compile('公司官网：')).parent.parent.get_text().replace('公司官网：', '').strip()
        except:
            site = ''

        try:
            logoUrl = 'https:' + soup.find(class_ = 'cimg').get('src')
        except:
            logoUrl = ''

        item = {
            'name': name,
            'type': type,
            'site': site,
            'url': link,
            'size': size,
            'logoUrl': logoUrl,
            'industry': industry,
            'location': location,
            'platform': 4,
            'compId': comp_id(),
            'description': description,
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        if not knx_all_enterprise_coll.find_one({'name': name}, no_cursor_timeout = True):
            knx_all_enterprise_coll.insert_one(item)
            print(name)

        return True

    def parse_hidTotal(self, comp):
        headers2['Referer'] = comp['Referer']

        try:
            r = requests.get(comp['link'], headers = headers2, cookies = self.cookie, timeout = 5)
        except:
            return ''

        page_source = r.text
        soup = BeautifulSoup(page_source, 'lxml')

        self.cookie.update(r.cookies)

        if soup.find(id = 'hidTotal'):
            return soup.find(id = 'hidTotal').get('value')
        else:
            return ''

    def parse_salary(self, salary):
        if salary.find('千/月') != -1 or salary.find('千以下/月') != -1 or salary.find('千以上/月') != -1:
            salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) * 1000)
        elif salary.find('万/月') != -1 or salary.find('万以上/月') != -1 or salary.find('万以下/月') != -1:
            salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) * 10000)
        elif salary.find('万/年') != -1 or salary.find('万以上/年') != -1 or salary.find('万以下/年') != -1:
            salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) / 12 * 10000)
        elif salary.find('元/天') != -1 or salary.find('元以上/天') != -1 or salary.find('元以下/天') != -1:
            salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) * 30)
        else:
            salary = []

        return salary

    def contain_shielding_words(self, name):
        for i in self.shielding_words:
            if i in name:
                return True

        return False


def start(begin, end):
    QIANCHENG_POST(begin, end)


if __name__ == '__main__':
    from optparse import OptionParser

    parser = OptionParser(usage = "%prog [options]")
    parser.add_option("-e", "--network", action = "store", type = "str", dest = "address",
                      help = "the address of mongodb", default = 'localhost')

    options, args = parser.parse_args()

    client = MongoClient(options.address, 27017, connect = False)

    qushu_qiancheng_db = client['qushu_qiancheng_db']
    qushu_qiancheng_position = qushu_qiancheng_db['qushu_qiancheng_position']

    p1 = Process(target = start, args = (1, 50))
    p2 = Process(target = start, args = (50, 100))
    p3 = Process(target = start, args = (100, 300))
    p4 = Process(target = start, args = (300, 600))
    p5 = Process(target = start, args = (600, 900))
    p6 = Process(target = start, args = (900, 1300))
    p7 = Process(target = start, args = (1300, 1900))
    p8 = Process(target = start, args = (1900, 2300))
    p9 = Process(target = start, args = (2300, 2996))

    p1.start()
    p2.start()
    p3.start()
    p4.start()
    p5.start()
    p6.start()
    p7.start()
    p8.start()
    p9.start()

    p1.join()
    p2.join()
    p3.join()
    p4.join()
    p5.join()
    p6.join()
    p7.join()
    p8.join()
    p9.join()
