from pymongo import MongoClient
from bs4 import BeautifulSoup
from pprint import pprint
import requests
import datetime
import json
import time
import re

client = MongoClient('127.0.0.1', 27017)
db = client['knx_posts_db']
offical_posts_coll = db['offical_posts_coll']


class POST():
    def __init__(self):
        self.scrapy()

    def scrapy(self):
        url = 'https://careers.peopleclick.com/careerscp/client_gsk/external1931/ZH_CN/gateway.do?functionName=searchFromLink&com.peopleclick.cp.formdata.FLD_JPM_COUNTRY=44&com.peopleclick.cp.formdata.SEARCHCRITERIA_JOBPOSTAGE=MONTH_1'
        r = requests.get(url)
        soup = BeautifulSoup(r.text, 'lxml')

        for item in soup.select('.pf-srp-singlehit'):
            try:
                name = item.find(class_ = 'pf-srp-jobResult-title').text.strip()
                detail_url = 'https://careers.peopleclick.com/careerscp/client_gsk/external1931/ZH_CN/' + item.find(class_ = 'pf-srp-jobResult-title').get('href')

                description = BeautifulSoup(requests.get(detail_url).text, 'lxml').find(class_ = 'pf-rwd-jobdetails-body').text.strip()
                location = item.find(class_ = 'pf-rwd-additionJobpostFields1').select('span')[0].text.strip()
                date = item.find(class_ = 'pf-rwd-additionJobpostFields1').select('span')[2].text.strip()

                description = description[:description.find('Why GSK?:')].strip()
            except:
                continue

            item = {
                "url": detail_url,
                'edu': '',
                'exp': [],
                'name': name,
                'date': date,
                'lang': '',
                'place': '',
                'major': '',
                'count': '',
                'salary': [],
                'toSchool': True,
                'welfare': [],
                'funType': '',
                'company': 'GSK',
                'location': location,
                'industry': '化工',
                'keywords': [],
                'platform': 'offical',
                'searchKeyword': '',
                'description': description,
                'subIndustry': '',
                'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            }

            print(item['company'], item['name'])
            if not offical_posts_coll.find_one({'name': item['name'], 'company': item['company'], 'location': item['location']}):
                offical_posts_coll.insert_one(item)


POST()
