from pymongo import MongoClient
from bs4 import BeautifulSoup
from pprint import pprint
import requests
import datetime
import json
import time
import re

server_client = MongoClient('localhost', 27017)

server_db = server_client['knx_posts_db']
offical_posts_coll = server_db['offical_posts_coll']

headers = {
    'Accept': 'text/html, */*; q=0.01',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
    'Host': 'www.ncss.org.cn',
    'Origin': 'http://www.ncss.org.cn',
    'Pragma': 'no-cache',
    'Referer': 'http://www.ncss.org.cn/cnooc/job',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36',
    'X-Requested-With': 'XMLHttpRequest'
}

headers2 = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'www.ncss.org.cn',
    'Pragma': 'no-cache',
    'Referer': 'http://www.ncss.org.cn/cnooc/job',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'
}

data = {
    'recId': 0,
    'major': '',
    'degree': '',
    'daylimit': -1,
    'endDate': '',
    'pageSize': 100,
    'pageIndex': 1
}


class POST():
    def __init__(self):
        self.scrapy()

    def scrapy(self):
        url = 'http://job.byd.com.cn/campus/Jobs.jsp'
        r = requests.post(url)
        soup = BeautifulSoup(r.text, 'lxml')

        for jd in soup.select('.pos_detail'):
            name = jd.find(text = re.compile('职位类别')).parent.find_next_sibling().text.strip()
            location = jd.find(text = re.compile('工作地点')).parent.find_next_sibling().text.strip()
            major = jd.find(text = re.compile('学历要求')).parent.find_next_sibling().text.strip()
            description0 = jd.find(text = re.compile('任职要求')).parent.find_next_sibling().text.strip()
            description1 = jd.find(text = re.compile('职责描述')).parent.find_next_sibling().text.strip()

            company = '比亚迪股份有限公司'

            item = {
                "url": url,
                'edu': '',
                'exp': [],
                'name': name,
                'date': '',
                'lang': '',
                'place': '',
                'major': major,
                'count': '',
                'salary': [],
                'toSchool': True,
                'welfare': [],
                'funType': '',
                'company': company,
                'location': location,
                'industry': '化工',
                'keywords': [],
                'platform': 'offical',
                'searchKeyword': '',
                'description': description0 + '\n' + description1,
                'subIndustry': '',
                'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            }

            # pprint(item)
            result = offical_posts_coll.replace_one({'company': company, 'name': name, 'location': location}, item, True)

            if result.matched_count:
                pprint(item)
                print('-' * 40, 'update one job', '-' * 40)
            else:
                pprint(item)
                print('-' * 40, 'insert one job', '-' * 40)


POST()
