# -*- coding: utf-8 -*-
import sys

sys.path.append('/root/anaconda3/lib/python3.6/site-packages')

from multiprocessing import Process
from multiprocessing import Pool
from pymongo import MongoClient
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
from urllib import request
from urllib import parse

import numpy as np
import requests
import datetime
import pymongo
import random
import codecs
import json
import pprint
import time
import ssl
import os
import re

if sys.stdout.encoding != 'UTF-8':
    sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict')
if sys.stderr.encoding != 'UTF-8':
    sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict')

client = MongoClient('127.0.0.1', 27017, connect = False)

# 职位总库
knx_all_position_db = client['knx_all_position_db']
knx_all_position_coll = knx_all_position_db['knx_all_position_coll']

# 企业总库
knx_all_enterprise_db = client['knx_all_enterprise_db']
knx_all_enterprise_coll = knx_all_enterprise_db['knx_all_enterprise_coll']


def comp_id():
    seq = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
    id = []

    for i in range(30):
        id += random.sample(seq, 1)

    return ''.join(id)


headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'company.51job.com',
    'Pragma': 'no-cache',
    'Referer': 'https://company.51job.com/p1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
}

headers2 = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'jobs.51job.com',
    'Pragma': 'no-cache',
    'Referer': 'https://company.51job.com/p3',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
}

headers3 = {
    'Accept': '*/*',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Content-Type': 'application/x-www-form-urlencoded',
    'Host': 'jobs.51job.com',
    'Origin': 'https://jobs.51job.com',
    'Pragma': 'no-cache',
    'Referer': 'https://jobs.51job.com/all/co2608550.html',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
    'X-Requested-With': 'XMLHttpRequest'
}

headers4 = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'jobs.51job.com',
    'Pragma': 'no-cache',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
}

headers5 = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'jobs.51job.com',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'
}


class QIANCHENG_POST():
    def __init__(self, start, end):
        self.end = end
        self.start = start
        self.cookie = dict()
        self.comp_list_page = 1
        self.job_list_page = 1
        self.cur_company = ''
        self.base_url = 'https://company.51job.com/p'

        self.shielding_words = [
            '0基础', '0经验', '零经验', '零基础', '包吃', '包住', '包食', '包宿', '包三餐', '出纳', '收银员', '底薪',
            '提成', '学徒', '无需经验', '电销', '电催', '前台', '接待', '业务员', '总机', '护士', '后勤', '导购',
            '导医', '电话客服', '无责', '不是梦', '来挑战', '高薪', '高收入', '月薪', '年薪', '五险', '六险', '双休',
            '买社保', '福利好', '住宿', '高福利', '催收', '月均', '月入', '电话销售', '电话营销', '房产经纪人', '技工',
            '地产经纪人', '地产销售', '地产中介', '地产客服', '店员', '营业员', '置业顾问', '你敢来', '过万', '发货',
            '跳槽', '信用卡专员', '保险代理', '送餐员', '快递员', '水工', '电工', '水电工', '普工', '收款员', '仓库',
            '房产销售', '房产中介', '房产客服', '收银员', '保洁', '挣钱', '赚钱', '无学历', '食堂', '均薪', '叉车', '店长',
            '副店长', '订单员', '话务员', '餐厅', '梦想', '我们', '地推', '仓管', '值班', '自己', '维修工', '回家', '店经理',
            '跟单', '安全员', '施工', '操作工', '门店', '保安', '材料员', '小时工', '收派员', '厨师', '司机', '装配工', '资料员',
            '派件员'
        ]

        self.scrapy_company_list()

    def scrapy_company_list(self):
        for p in range(self.start, self.end):
            url = self.base_url + str(p)
            headers['Referer'] = url
            r = requests.get(url, headers = headers, cookies = self.cookie)
            page_source = r.text.encode("iso-8859-1").decode('gbk').encode('utf8').decode('utf8')
            soup = BeautifulSoup(page_source, 'lxml')

            self.cookie.update(r.cookies)
            self.comp_list_page = p

            for comp in soup.select('.c2-main .c2-t'):
                link = comp.find(class_ = 's1').find('a').get('href')
                name = comp.find(class_ = 's1').find('a').get('title')
                type = comp.find(class_ = 's2').get_text()
                size = comp.find(class_ = 's3').get_text()
                city = comp.find(class_ = 's4').get_text()
                industry = comp.find(class_ = 's5').get_text()

                if name.find('某') != -1:
                    continue

                if size in ['少于50人', '50-150人', '150-500人']:
                    continue

                item = {
                    'name': name,
                    'link': link,
                    'type': type,
                    'size': size,
                    'city': city,
                    'Referer': url,
                    'industry': industry
                }

                self.scrapy_company(link, item, url)
                self.scrapy_position(item)

    def scrapy_position(self, comp):
        hidTotal = self.parse_hidTotal(comp)

        if not hidTotal:
            return

        for pageno in range(1, round(100000 / 20)):
            headers3['Referer'] = comp['link']
            r = requests.post(comp['link'], data = {'hidTotal': hidTotal, 'type': 'undefined', 'code': 'undefined', 'pageno': pageno}, cookies = self.cookie, headers = headers3)
            soup = BeautifulSoup(r.text, 'lxml')

            if len(soup.select('.el')) == 0:
                return

            for job in soup.select('.el'):
                link = job.find(class_ = 't1').find('a').get('href')
                name = job.find(class_ = 't1').find('a').get('title').strip()
                info = job.find(class_ = 't2').text.strip()
                location = job.find(class_ = 't3').text.strip()
                salary = job.find(class_ = 't4').text.strip()
                date = job.find(class_ = 't5').text.strip()

                if len(date.split('-')) == 2:
                    month = int(date.split('-')[0])
                    day = int(date.split('-')[1])

                    if (datetime.datetime.now() - datetime.datetime(2018, month, day)).days >= 15:
                        continue

                if knx_all_position_coll.find_one({'name': name, 'location': location, 'company': comp['name']}):
                    continue

                if '年经验' in info or '以上经验' in info:
                    continue

                if '大专' in info or '中专' in info or '高中' in info or '初中及以下' in info or '中技' in info:
                    continue

                salary = self.parse_salary(salary)

                if len(salary) and max(salary) < 6000:
                    continue

                if self.contain_shielding_words(name):
                    continue

                self.save_position(link, comp, salary, date, location)

            self.cookie.update(r.cookies)

    def save_position(self, url, comp, salary, date, location):
        if url.find('jobs.51job.com') == -1:
            return

        r = requests.get(url, headers = headers)
        page_source = r.text
        soup = BeautifulSoup(page_source, 'lxml')

        try:
            name = soup.find(class_ = 'tHjob').find('h1').text.strip()
        except:
            name = ''

        try:
            company = soup.find(class_ = 'cname').find('a').text.strip()
        except:
            company = ''

        # try:
        #     location = soup.find(class_ = 'lname').text.strip()
        # except:
        #     location = ''

        # try:
        #     salary = self.parse_salary(soup.find(class_ = 'lname').find_next_sibling().text.strip())
        # except:
        #     salary = []

        try:
            description = soup.find(class_ = 'job_msg').text.strip()

            if description.find('职能类别') != -1:
                description = description[:description.find('职能类别')].strip()
        except:
            description = ''

        try:
            exp = re.findall('([^|\s]+)年经验', soup.select('.tHeader.tHjob .msg.ltype')[0].get_text().strip())

            if len(exp) and '-' in exp[0]:
                exp = exp[0].split('-')

            exp = np.array(exp, dtype = int).tolist()
        except:
            exp = []

        try:
            edu = ''

            for i in ['初中', '中专', '大专', '高中', '本科', '研究生', '硕士', '博士']:
                if i in soup.find(class_ = 'msg ltype').text.strip():
                    edu = i
                    break
        except:
            edu = ''

        try:
            count = int(re.sub("\D", "", soup.select('.jtag.inbox .t1 .i3')[0].parent.get_text()))
        except:
            count = ''

        try:
            lang = soup.select('.jtag.inbox .t1 .i5')[0].parent.get_text()
        except:
            lang = ''

        try:
            major = soup.select('.jtag.inbox .t1 .i6')[0].parent.get_text()
        except:
            major = ''

        try:
            welfare = [t.text for t in soup.find(class_ = 'jtag').select('span')]
        except:
            welfare = []

        try:
            funType = soup.find(text = re.compile('职能类别')).parent.find_next_sibling().text
        except:
            funType = ''

        try:
            place = soup.find(text = re.compile('上班地址')).parent.parent.get_text().strip().split('：')[1]
        except:
            place = ''

        try:
            industry = soup.find(class_ = 'com_tag').select('p')[2].get_text()
        except:
            industry = ''

        try:
            logoUrl = 'https:' + soup.find(class_ = 'himg').find('img').get('src')
        except:
            logoUrl = ''

        item = {
            'url': url,
            'edu': edu,
            'exp': exp,
            'name': name,
            'date': date,
            'lang': lang,
            'place': place,
            'major': major,
            'count': count,
            'salary': salary,
            'welfare': welfare,
            'funType': funType,
            'company': company,
            'location': location,
            'logoUrl': logoUrl,
            'compId': '',
            'industry': industry,
            'ranking_score': -1,
            'platform': 4,
            'compsize': comp['size'],
            'description': description,
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        if len(item['name']) == 0 or len(item['description']) == 0:
            return

        if not knx_all_position_coll.find_one({'name': name, 'company': company, 'location': location}):
            print(company, name, 'inserted [message from qiancheng]')
            knx_all_position_coll.insert_one(item)
        else:
            print(company, name, 'existed [message from qiancheng]')

        self.cookie.update(r.cookies)

    def scrapy_company(self, link, item, url):
        if knx_all_enterprise_coll.find_one({'name': item['name']}):
            return

        if link.find('jobs.51job.com') == -1:
            return

        try:
            headers5['Referer'] = url
            r = requests.get(link, headers = headers5, timeout = 5)
        except:
            return

        try:
            soup = BeautifulSoup(r.text, 'lxml')

            if not soup:
                return
        except:
            return

        try:
            name = soup.find('h1').text.strip()
        except:
            name = ''

        try:
            type = soup.find(class_ = 'tHeader').find(class_ = 'ltype').text.split()[0]
        except:
            type = ''

        try:
            size = soup.find(class_ = 'tHeader').find(class_ = 'ltype').text.split()[2]
        except:
            size = ''

        try:
            industry = soup.find(class_ = 'tHeader').find(class_ = 'ltype').text.split()[4]
        except:
            industry = ''

        try:
            description = soup.find(class_ = 'con_txt').get_text().strip()
        except:
            description = ''

        try:
            location = soup.find(text = re.compile('公司地址')).parent.parent.get_text().replace('公司地址：', '').strip().replace(' ', '')
        except:
            location = ''

        try:
            site = soup.find(text = re.compile('公司官网：')).parent.parent.get_text().replace('公司官网：', '').strip()
        except:
            site = ''

        try:
            logoUrl = 'https:' + soup.find(class_ = 'cimg').get('src')
        except:
            logoUrl = ''

        item = {
            'name': name,
            'type': type,
            'site': site,
            'url': link,
            'size': size,
            'logoUrl': logoUrl,
            'industry': industry,
            'location': location,
            'platform': 4,
            'compId': comp_id(),
            'description': description,
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        if not knx_all_enterprise_coll.find_one({'name': name}):
            knx_all_enterprise_coll.insert_one(item)
            print(name, 'inserted [message from qiancheng]')

        return True

    def parse_hidTotal(self, comp):
        headers2['Referer'] = comp['Referer']
        r = requests.get(comp['link'], headers = headers2, cookies = self.cookie, timeout = 5)
        page_source = r.text
        soup = BeautifulSoup(page_source, 'lxml')

        self.cookie.update(r.cookies)

        if soup.find(id = 'hidTotal'):
            return soup.find(id = 'hidTotal').get('value')
        else:
            return ''

    def parse_salary(self, salary):
        if salary.find('千/月') != -1 or salary.find('千以下/月') != -1 or salary.find('千以上/月') != -1:
            salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) * 1000)
        elif salary.find('万/月') != -1 or salary.find('万以上/月') != -1 or salary.find('万以下/月') != -1:
            salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) * 10000)
        elif salary.find('万/年') != -1 or salary.find('万以上/年') != -1 or salary.find('万以下/年') != -1:
            salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) / 12 * 10000)
        elif salary.find('元/天') != -1 or salary.find('元以上/天') != -1 or salary.find('元以下/天') != -1:
            salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) * 30)
        else:
            salary = []

        return salary

    def contain_shielding_words(self, name):
        for i in self.shielding_words:
            if i in name:
                return True

        return False


def start(begin, end):
    QIANCHENG_POST(begin, end)


if __name__ == '__main__':
    p1 = Process(target = start, args = (1, 500))
    p2 = Process(target = start, args = (500, 2000))
    p3 = Process(target = start, args = (2000, 2500))
    p4 = Process(target = start, args = (2500, 3000))

    p1.start()
    p2.start()
    p3.start()
    p4.start()

    p1.join()
    p2.join()
    p3.join()
    p4.join()
