from pymongo import MongoClient
from bs4 import BeautifulSoup
from pprint import pprint

import requests
import datetime
import json
import time
import re

server_client = MongoClient('localhost', 27017)

server_db = server_client['knx_posts_db']
offical_posts_coll = server_db['offical_posts_coll']

data = {
    'pc.currentPage': 1,
    'pc.rowSize': 100,
    'orgId': '',
    'releaseTime': '',
    'keyWord': '',
    'positionType': '',
    'trademark': 1,
    'workPlace': '',
    'useForm': '',
    'positionName': '',
    'recruitType': 1,
    'specialRecruitmentId': '',
    'brandCode': 1,
    'searchType': 1,
    'blockType': 1,
    'contentModuleType': '',
    'tagType': '',
    'comPart': '',
    'orderInt': 0,
    'workPlaceNum': 5,
    'workTypeNum': 5,
    'workCompanyNum': 5,
    'colPoitionName': 'a',
    'colPositionType': 'b',
    'colPositionCompany': 'c',
    'colPostionRecruit': '',
    'colPositionWorkPlace': 'e',
    'colPostionReleaseTime': 'f',
    'positionNameLength': 200,
    'positionTypeLength': '',
    'postionCompanyLength': '',
    'positionRecruitLength': '',
    'positionWorkPlaceLength': '',
    'postionReleaseTimeLength': '',
    'positionColUseDefault': 1,
    'positionNameV': '',
    'workPlaceNameV': '',
    'positionTypeV': '',
    'sort_order': 'by_time'
}


class POST():
    def __init__(self):
        self.scrapy()

    def scrapy(self):
        url = 'http://www.hotjob.cn/wt/joyoung/web/index/webPosition210!getPostListByConditionShowPic'
        params = {
            'columnId': 1,
            'operational': '57b0e6fec904c9e483ad40bf30e65ed4df9d9fcb33f5f6e77844ff4b5e514c16f04b75e32e136614584b2798ad77fbca901f1e0bd72328dd06f9852c3728904c707e8e39e66f912794cffdaf206fd854450768aa9349d15939db3469f46fa3c8e82ac1bb1b02b69d'
        }
        r = requests.post(url, params = params, data = data)
        soup = BeautifulSoup(r.text)

        for jd in soup.select('.search_result tr')[1:]:
            name = jd.select('td')[0].find('a').get_text().strip()
            company = jd.select('td')[2].get_text()
            location = jd.select('td')[3].get_text().strip()
            date = jd.select('td')[4].get_text()

            url = 'http://www.hotjob.cn' + jd.select('td')[0].find('a').get('href')
            detail_soup = BeautifulSoup(requests.get(url).text)

            count = detail_soup.find(text = re.compile('招聘人数：')).parent.get_text().split('：')[1]
            description = detail_soup.find(class_ = 'position_content').get_text()[:-80].strip()
            try:
                edu = detail_soup.find(text = re.compile('历：')).parent.get_text().split('：')[1]
            except:
                edu = ''

            item = {
                "url": url,
                'edu': edu,
                'exp': [],
                'name': name,
                'date': date,
                'lang': '',
                'place': '',
                'major': '',
                'count': count,
                'salary': [],
                'toSchool': True,
                'welfare': [],
                'funType': '',
                'company': company,
                'location': location,
                'industry': '制造',
                'keywords': [],
                'platform': 'offical',
                'searchKeyword': '',
                'description': description,
                'subIndustry': '',
                'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            }

            # pprint(item)
            result = offical_posts_coll.replace_one({'company': company, 'name': name, 'location': location}, item, True)

            if result.matched_count:
                pprint(item)
                print('-' * 40, 'update one job', '-' * 40)
            else:
                pprint(item)
                print('-' * 40, 'insert one job', '-' * 40)

            time.sleep(1)


POST()
