from pymongo import MongoClient
from bs4 import BeautifulSoup
from multiprocessing import Process
from multiprocessing import Pool

import pprint
import requests
import json
import re

client = MongoClient('47.96.88.18', 27017, connect = False)

cv_match_dajie_com_info_db = client['cv_match_dajie_com_info_db']
cv_match_dajie_com_info_coll = cv_match_dajie_com_info_db['cv_match_dajie_com_info_coll']


def crawl_com_info(url):
    try:
        r = requests.get(url, timeout = 5)
    except:
        print('request error!')
        return crawl_com_info(url)

    soup = BeautifulSoup(r.text, 'lxml')

    try:
        name = soup.find('h1').get_text().strip()
    except:
        name = ''

    try:
        size = soup.find(class_ = 'd-person').text
    except:
        size = ''

    try:
        if soup.find(class_ = 'cor-table'):
            type = soup.find(class_ = 'cor-table').select('tr')[1].select('td')[2].text.strip()
        else:
            type = soup.find(class_ = 'd-come').text
    except:
        type = ''

    try:
        if soup.find(class_ = 'cor-table'):
            industry = soup.find(class_ = 'cor-table').select('tr')[1].select('td')[0].text.strip()
        else:
            industry = soup.find(class_ = 'd-type').text
    except:
        industry = ''

    try:
        description = soup.find(class_ = 'cor-introduce').get_text().strip()
    except:
        description = ''

    item = {
        'name': name,
        'size': size,
        'type': type,
        'industry': industry,
        'description': description
    }

    if name and industry and not cv_match_dajie_com_info_coll.find_one({'name': name}):
        cv_match_dajie_com_info_coll.insert_one(item)
        print(name, 'inserted')


for id in ['310000', '120000', '500000', '340000', '350000', '620000', '440000', '450000', '520000', '460000', '130000', '410000', '230000', '420000', '430000', '220000', '320000', '360000', '210000', '150000', '640000', '630000', '370000', '140000', '610000', '510000', '540000', '650000', '530000', '330000', '800000', '810000', '999999']:
    for page in range(1, 300):
        page_source = requests.get('https://www.dajie.com/corp/index-pa' + str(page) + '-ci' + id + '-po-kw/').text
        soup = BeautifulSoup(page_source, 'lxml')

        for item in soup.select('.corp-list li'):
            name = item.find(class_ = 'job-name').find('a').text.strip()

            if cv_match_dajie_com_info_coll.find_one({'name': name}):
                continue

            link = 'https://www.dajie.com/corp/' + item.find(class_ = 'job-name').find(class_ = 'attention').get('data-corp-id') + '/index/intro'
            crawl_com_info(link)
