from pymongo import MongoClient
from bs4 import BeautifulSoup
from multiprocessing import Process
from multiprocessing import Pool

import pprint
import requests
import json
import re

headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'company.51job.com',
    'Pragma': 'no-cache',
    'Referer': 'https://company.51job.com/p1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
}

headers5 = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'jobs.51job.com',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'
}

client = MongoClient('47.96.88.18', 27017, connect = False)

cv_match_qiancheng_com_info_db = client['cv_match_qiancheng_com_info_db']
cv_match_qiancheng_com_info_coll = cv_match_qiancheng_com_info_db['cv_match_qiancheng_com_info_coll']


def crawl_com_info(url):
    headers5['Referer'] = url

    try:
        r = requests.get(url, headers = headers5, timeout = 5)
    except:
        return crawl_com_info(url)

    soup = BeautifulSoup(r.text, 'lxml')

    try:
        name = soup.find('h1').text.strip()
    except:
        name = ''

    try:
        size = soup.find(class_ = 'tHeader').find(class_ = 'ltype').text.split()[2]
    except:
        size = ''

    try:
        type = soup.find(class_ = 'tHeader').find(class_ = 'ltype').text.split()[0]
    except:
        type = ''

    try:
        industry = soup.find(class_ = 'tHeader').find(class_ = 'ltype').text.split()[4]
    except:
        industry = ''

    try:
        description = soup.find(class_ = 'con_txt').get_text().strip()
    except:
        description = ''

    item = {
        'name': name
    }

    if name and not cv_match_qiancheng_com_info_coll.find_one({'name': name}):
        cv_match_qiancheng_com_info_coll.insert_one(item)
        print(name, 'inserted')


def start(begin, end):
    for page in range(begin, end):
        r = requests.get('https://company.51job.com/hy40/p' + str(page) + '/', headers = headers)
        page_source = r.text.encode("iso-8859-1").decode('gbk').encode('utf8').decode('utf8')
        soup = BeautifulSoup(page_source, 'lxml')

        for comp in soup.select('.c2-main .c2-t'):
            link = comp.find(class_ = 's1').find('a').get('href')
            name = comp.find(class_ = 's1').find('a').get('title')

            if cv_match_qiancheng_com_info_coll.find_one({'name': name}):
                continue

            crawl_com_info(link)


if __name__ == '__main__':
    p1 = Process(target = start, args = (1, 750))
    p2 = Process(target = start, args = (750, 1500))
    p3 = Process(target = start, args = (1500, 2200))
    p4 = Process(target = start, args = (2200, 3006))

    p1.start()
    p2.start()
    p3.start()
    p4.start()

    p1.join()
    p2.join()
    p3.join()
    p4.join()
