from pymongo import MongoClient
from bs4 import BeautifulSoup
from pprint import pprint
import requests
import datetime
import json
import time
import re

client = MongoClient('47.104.130.19', 27017)
db = client['knx_posts_db']
offical_posts_coll = db['offical_posts_coll']


class POST():
    def __init__(self):
        self.scrapy()

    def scrapy(self):
        url = 'https://www.roche.com/zh/toolbox/jobSearch.json'
        r = requests.get(url, params = {
            'type': 'json',
            'api': 'jobs',
            'pageLength': 1500,
            'locale': 'en',
            'keywords': 'China',
            'orderByType': 'desc',
            'orderBy': 'openDate',
            'offset': 0
        })

        n = 0

        for item in r.json()['jobs']['items']:
            try:
                url = 'https://www.roche.com' + item['detailsUrl']
                location = ''

                if 'city' in item['primaryLocation']:
                    location = item['primaryLocation']['city']
                elif 'state' in item['primaryLocation']:
                    location = item['primaryLocation']['state']

                date = item['openDate']
                name = item['title']
                page = requests.get(url).text
                soup = BeautifulSoup(page, 'lxml')
                description = soup.find(class_ = 'expandable-text').get_text().strip() + '\n' + soup.find(class_ = 'open-all-elements').select('li')[0].get_text().strip()

                item = {
                    "url": url,
                    'edu': '',
                    'exp': [],
                    'name': name,
                    'date': date,
                    'lang': '',
                    'place': '',
                    'major': '',
                    'count': '',
                    'salary': [],
                    'toSchool': True,
                    'welfare': [],
                    'funType': '',
                    'company': '罗氏',
                    'location': location,
                    'industry': '医疗',
                    'keywords': [],
                    'platform': 'offical',
                    'searchKeyword': '',
                    'description': description,
                    'subIndustry': '',
                    'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                }

                inserted_id = offical_posts_coll.replace_one({
                    'company': '罗氏',
                    'name': name,
                    'location': location
                }, item, True)

                if inserted_id:
                    print('insert one', n)

                n += 1
            except:
                continue


POST()
