from pymongo import MongoClient
from selenium import webdriver
from bs4 import BeautifulSoup
from pprint import pprint

import requests
import datetime
import json
import time
import re

chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
EXECUTE_PATH = '/Users/xuchaosheng/Workspace/KNX/libs/chromedriver'
driver = webdriver.Chrome(chrome_options = chrome_options, executable_path = EXECUTE_PATH)

server_client = MongoClient('localhost', 27017)

server_db = server_client['knx_posts_db']
offical_posts_coll = server_db['offical_posts_coll']


class POST():
    def __init__(self):
        self.scrapy()

    def scrapy(self):
        url = 'http://campus.didichuxing.com/campus/positions/1'
        driver.get(url)
        time.sleep(5)

        for j in range(1, 6):
            url = 'http://campus.didichuxing.com/campus/positions/' + str(j)
            driver.get(url)
            time.sleep(3)

            for i in range(len(driver.find_elements_by_class_name('campus-positions-content-tab-wrapper'))):
                ele = driver.find_elements_by_class_name('campus-positions-content-tab-wrapper')[i]
                ele.click()

                time.sleep(1)

                soup = BeautifulSoup(driver.page_source)

                name = driver.find_elements_by_class_name('campus-positions-content-tab-wrapper')[i].text
                location = soup.find(text = re.compile('工作地点：')).parent.get_text().split('：')[1]
                description0 = soup.find(text = re.compile('职位描述')).parent.parent.parent.get_text()
                description1 = soup.find(text = re.compile('任职要求')).parent.parent.parent.get_text()

                try:
                    description2 = soup.find(text = re.compile('职位小方向')).parent.parent.get_text()
                except:
                    description2 = ''

                company = '北京嘀嘀无限科技发展有限公司'

                item = {
                    "url": url,
                    'edu': '',
                    'exp': [],
                    'name': name,
                    'date': '',
                    'lang': '',
                    'place': '',
                    'major': '',
                    'count': -1,
                    'salary': [],
                    'toSchool': True,
                    'welfare': [],
                    'funType': '',
                    'company': company,
                    'location': location,
                    'industry': '互联网',
                    'keywords': [],
                    'platform': 'offical',
                    'searchKeyword': '',
                    'description': description0 + '\n' + description1 + '\n' + description2,
                    'subIndustry': '',
                    'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                }

                # pprint(item)
                result = offical_posts_coll.replace_one({'company': company, 'name': name, 'location': location}, item, True)

                if result.matched_count:
                    pprint(item)
                    print('-' * 40, 'update one job', '-' * 40)
                else:
                    pprint(item)
                    print('-' * 40, 'insert one job', '-' * 40)

                time.sleep(1)


POST()
