from pymongo import MongoClient
from selenium import webdriver
from bs4 import BeautifulSoup
from pprint import pprint

import pprint
import requests
import datetime
import json
import time
import re

server_client = MongoClient('127.0.0.1', 27017)

server_db = server_client['knx_posts_db']
offical_posts_coll = server_db['offical_posts_coll']

data = {
    'pc.currentPage': 1,
    'pc.rowSize': 10,
    'orgId': '',
    'releaseTime': 0,
    'keyWord': '',
    'positionType': '',
    'trademark': 1,
    'workPlace': '',
    'useForm': 0,
    'positionName': '',
    'recruitType': 1,
    'specialRecruitmentId': '',
    'brandCode': 1,
    'searchType': 1,
    'blockType': '',
    'contentModuleType': '',
    'tagType': '',
    'comPart': '',
    'orderInt': 0,
    'workPlaceNum': 5,
    'workTypeNum': 5,
    'workCompanyNum': 5,
    'colPoitionName': 'a',
    'colPositionType': 'b',
    'colPositionCompany': '',
    'colPostionRecruit': '',
    'colPositionWorkPlace': 'e',
    'colPostionReleaseTime': 'f',
    'positionNameLength': '',
    'positionTypeLength': '',
    'postionCompanyLength': '',
    'positionRecruitLength': '',
    'positionWorkPlaceLength': '',
    'postionReleaseTimeLength': '',
    'positionColUseDefault': 1,
    'keyWordV': '',
    'workPlaceNameV': '',
    'positionTypeV': '',
    'comPartV': '',
    'sicCorpCodeV': '',
    'sort_order': 'by_time'
}

params = {
    'columnId': '1',
    'operational': 'b855137b9967a8aa0f078b1142c3f685067f953b2d859369dfb5605a60174471c6685f42eb9acbbeb0b5ff478edaed72194f9e94a667b2b26a64cc76e7075143960b09e42eb3f36f5c7ecc21a706db560121c4bfeaf1c5534ac7c073fd9c579d4c6cda19e3dc9ccd'
}

for i in range(1, 10):
    data['pc.currentPage'] = i
    r = requests.post('http://www.hotjob.cn/wt/didichuxing/web/index/webPosition210!getPostListByConditionShowPic', params = params, data = data)
    soup = BeautifulSoup(r.text, 'lxml')

    for ele in soup.find(class_ = 'search_result').select('tr')[1:]:
        href = 'http://www.hotjob.cn/' + ele.find('a').get('href')
        s = BeautifulSoup(requests.get(href).text, 'lxml')

        name = ele.find('a').get_text().strip()

        try:
            location = s.find(text = re.compile('工作地点')).parent.find_next_sibling().find('span').get('title')
        except:
            location = ''

        try:
            edu = s.find(text = re.compile('学.+历')).parent.find_next_sibling().get_text()
        except:
            edu = ''

        try:
            date = s.find(text = re.compile('发布时间：')).parent.find_next_sibling().get_text()
        except:
            date = ''

        description = s.find(class_ = 'position_content').get_text().strip()[:s.find(class_ = 'position_content').get_text().strip().find('分享到')].strip()
        company = '北京嘀嘀无限科技发展有限公司'

        item = {
            "url": '',
            'edu': edu,
            'exp': [],
            'name': name,
            'date': date,
            'lang': '',
            'place': '',
            'major': '',
            'count': -1,
            'salary': [],
            'toSchool': True,
            'welfare': [],
            'funType': '',
            'company': company,
            'location': location,
            'industry': '互联网',
            'keywords': [],
            'platform': 'offical',
            'searchKeyword': '',
            'description': description,
            'subIndustry': '',
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        if not offical_posts_coll.find_one({'company': company, 'name': name, 'location': location}):
            offical_posts_coll.insert_one(item)
            print(company, name)
