from pymongo import MongoClient
from bs4 import BeautifulSoup
from pprint import pprint
import requests
import datetime
import json
import time
import re

client = MongoClient('127.0.0.1', 27017)
db = client['knx_posts_db']
offical_posts_coll = db['offical_posts_coll']

params = {
    'Accept': '*/*',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Content-Length': '50',
    'Content-Type': 'application/x-www-form-urlencoded',
    'Cookie': 'guid=15234920055498880057; _ujz=OTEzNDQ5NTIw; slife=lastlogindate%3D20180412%26%7C%26; ps=us%3DW2IBaAFmBS1SMg1gCmYDLgAxVmJXYFArU2JdcwE6UmdcZgVtVzECPQJhXzMHY1VgUWJTagc2V35SNVdvC3IFNVs1%26%7C%26needv%3D0; 51job=cuid%3D91344952%26%7C%26cusername%3Dphone_13735863577%26%7C%26cpassword%3D%26%7C%26cname%3D%25D0%25EC%2520%25B3%25AC%25CA%25A4%26%7C%26cemail%3D%26%7C%26cemailstatus%3D0%26%7C%26cnickname%3D%26%7C%26ccry%3D.05p%252FO46x1jOo%26%7C%26cconfirmkey%3D%25241%2524hj%252FGX9Ot%252462Pj8HYfbK8A2gEVNAfGn1%26%7C%26cresumeids%3D.0gPHU9iPYzLE%257C%26%7C%26cautologin%3D0%26%7C%26cenglish%3D0%26%7C%26sex%3D0%26%7C%26cnamekey%3D%25241%25240gRy8kQa%2524a0PKW%252F0xpluLzCx6AKXSd%252F%26%7C%26to%3DDTwGbAVkCjgAZQBlC2tXZlQrVyYBbwY8BW1XYF8KAjNeZQJuBGUGNV00AWsBZ11rUGQKPAR%252BUjZRYlA3AT5WZg03BmUFZwo%252BAGA%253D%26%7C%26; nsearch=jobarea%3D%26%7C%26ord_field%3D%26%7C%26recentSearch0%3D%26%7C%26recentSearch1%3D%26%7C%26recentSearch2%3D%26%7C%26recentSearch3%3D%26%7C%26recentSearch4%3D%26%7C%26collapse_expansion%3D; search=jobarea%7E%60080200%7C%21ord_field%7E%600%7C%21recentSearch0%7E%601%A1%FB%A1%FA080200%2C00%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA%B0%C4%D6%DE%C1%AA%B0%EE%D2%F8%D0%D0%A1%FB%A1%FA2%A1%FB%A1%FA%A1%FB%A1%FA-1%A1%FB%A1%FA1523492420%A1%FB%A1%FA0%A1%FB%A1%FA%A1%FB%A1%FA%7C%21; partner=zhaopin_baidu_com',
    'Host': 'jobs.51job.com',
    'Origin': 'https://jobs.51job.com',
    'Pragma': 'no-cache',
    'Referer': 'https://jobs.51job.com/all/co1644420.html?',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
    'X-Requested-With': 'XMLHttpRequest'
}


class POST():
    def __init__(self):
        self.scrapy()

    def scrapy(self):
        url = 'https://jobs.51job.com/all/co1644420.html'

        for i in range(1, 5):
            r = requests.post(url, data = {
                'pageno': str(i),
                'hidTotal': '61'
            }, params = params)
            soup_ = BeautifulSoup(r.text)

            for item in soup_.select('div.el'):
                url = item.find(class_ = 't1').find('a').get('href')
                r = requests.get(url)
                soup = BeautifulSoup(r.text.encode("iso-8859-1").decode('gbk').encode('utf8').decode('utf8'))

                try:
                    name = soup.find(class_ = 'tHjob').find('h1').text
                except:
                    name = ''

                try:
                    company = soup.find(class_ = 'cname').find('a').text
                except:
                    company = ''

                try:
                    location = soup.find(class_ = 'lname').text
                except:
                    location = ''

                try:
                    salary = soup.find(class_ = 'lname').find_next_sibling().text.strip()

                    if salary.find('千/月') != -1 or salary.find('千以下/月') != -1 or salary.find('千以上/月') != -1:
                        salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) * 1000)
                    elif salary.find('万/月') != -1 or salary.find('万以上/月') != -1 or salary.find('万以下/月') != -1:
                        salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) * 10000)
                    elif salary.find('万/年') != -1 or salary.find('万以上/年') != -1 or salary.find('万以下/年') != -1:
                        salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) / 12 * 10000)
                    elif salary.find('元/天') != -1 or salary.find('元以上/天') != -1 or salary.find('元以下/天') != -1:
                        salary = list(np.array(re.match('[\d\.\-]+', '200-400/天').group().split('-')).astype(float) * 30)
                    else:
                        salary = []
                except:
                    salary = []

                try:
                    description = soup.find(class_ = 'job_msg').text.strip()

                    if description.find('职能类别') != -1:
                        description = description[:description.find('职能类别')].strip()
                except:
                    description = ''

                try:
                    exp = soup.select('.jtag.inbox .t1 .i1')[0].parent.get_text()

                    if exp.find('无') != -1:
                        exp = []
                    else:
                        exp = re.findall(re.compile('\d+'), exp)
                except:
                    exp = []

                try:
                    edu = soup.select('.jtag.inbox .t1 .i2')[0].parent.get_text()
                except:
                    edu = ''

                try:
                    count = int(re.sub("\D", "", soup.select('.jtag.inbox .t1 .i3')[0].parent.get_text()))
                except:
                    count = ''

                try:
                    date = soup.select('.jtag.inbox .t1 .i4')[0].parent.get_text().replace('发布', '')
                except:
                    date = ''

                try:
                    lang = soup.select('.jtag.inbox .t1 .i5')[0].parent.get_text()
                except:
                    lang = ''

                try:
                    major = soup.select('.jtag.inbox .t1 .i6')[0].parent.get_text()
                except:
                    major = ''

                try:
                    welfare = soup.select('.jtag.inbox .t2')[0].get_text().strip().split('\n')
                except:
                    welfare = []

                try:
                    funType = soup.find(text = re.compile('职能类别')).parent.find_next_sibling().text
                except:
                    funType = ''

                try:
                    place = soup.find(text = re.compile('上班地址')).parent.parent.get_text().strip().split('：')[1]
                except:
                    place = ''

                try:
                    keywords += soup.find(class_ = 'job_msg').find(text = re.compile('关键字：')).parent.parent.get_text().strip().replace('关键字：', '').split()
                    keywords = list(set(keywords))
                except:
                    pass

                item = {
                    'url': url,
                    'edu': edu,
                    'exp': exp,
                    'name': name,
                    'date': date,
                    'lang': lang,
                    'place': place,
                    'major': major,
                    'count': count,
                    'salary': salary,
                    'toSchool': True,
                    'welfare': welfare,
                    'funType': funType,
                    'company': company,
                    'location': location,
                    'keywords': [],
                    'platform': 'qiancheng',
                    'description': description,
                    'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                    'industry': '',
                    'subIndustry': '',
                    'searchKeyword': '',
                    'salary2': soup.find(class_ = 'lname').find_next_sibling().text.strip()
                }

                if len(item['exp']):
                    continue

                if not offical_posts_coll.find_one({'name': item['name'], 'company': item['company'], 'location': item['location']}):
                    offical_posts_coll.insert_one(item)

                print(item['company'], item['name'])


POST()
