from pymongo import MongoClient
from bs4 import BeautifulSoup
from pprint import pprint
import requests
import datetime
import json
import time
import re

server_client = MongoClient('127.0.0.1', 27017)

server_db = server_client['knx_posts_db']
offical_posts_coll = server_db['offical_posts_coll']

data = {
    'pc.currentPage': '1',
    'pc.rowSize': '10000000',
    'orgId': '',
    'releaseTime': '',
    'keyWord': '',
    'positionType': '',
    'trademark': '1',
    'workPlace': '',
    'useForm': '',
    'positionName': '',
    'recruitType': '1',
    'specialRecruitmentId': '',
    'brandCode': '1',
    'searchType': '1',
    'blockType': '1',
    'contentModuleType': '',
    'tagType': '',
    'comPart': '',
    'orderInt': '0',
    'workPlaceNum': '6',
    'workTypeNum': '5',
    'workCompanyNum': '6',
    'colPoitionName': 'a',
    'colPositionType': '',
    'colPositionCompany': 'c',
    'colPostionRecruit': 'd',
    'colPositionWorkPlace': '',
    'colPostionReleaseTime': 'f',
    'positionNameLength': '230',
    'positionTypeLength': '',
    'postionCompanyLength': '200',
    'positionRecruitLength': '',
    'positionWorkPlaceLength': '',
    'postionReleaseTimeLength': '',
    'positionColUseDefault': '1',
    'keyWordV': '',
    'workPlaceNameV': '',
    'comPartV': '',
    'sicCorpCodeV': '',
    'sort_order': 'by_time'
}

headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Content-Type': 'application/x-www-form-urlencoded',
    'Host': 'www.hotjob.cn',
    'Origin': 'http://www.hotjob.cn',
    'Pragma': 'no-cache',
    'Referer': 'http://www.hotjob.cn/wt/Sinochem/web/index/webPosition210!getPostListByConditionShowPic?columnId=1&operational=0937dc1a7f4ad83ceb9bd2852c55271f94620959aafcacd7a439c6748874bfc30c12d219a6972fd383c01063826133a0d7fb5c79136aec8b7b524791d3c604d774ccc0c629313b44d62c8d866475fdfd17cc4d273434dca8c22b2968b285e718a492d2b9270a8164',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'
}


class POST():
    def __init__(self):
        self.scrapy()

    def scrapy(self):
        url = 'http://www.hotjob.cn/wt/Sinochem/web/index/webPosition210!getPostListByConditionShowPic?columnId=1&operational=0937dc1a7f4ad83ceb9bd2852c55271f94620959aafcacd7a439c6748874bfc30c12d219a6972fd383c01063826133a0d7fb5c79136aec8b7b524791d3c604d774ccc0c629313b44d62c8d866475fdfd17cc4d273434dca8c22b2968b285e718a492d2b9270a8164'
        r = requests.post(url, headers = headers, data = data)
        soup = BeautifulSoup(r.text)

        loc = {
            '全部地区': '北京,上海,天津,重庆,石家庄,太原,呼和浩特,沈阳,大连,长春,哈尔滨,南京,杭州,合肥,福州,三明,南昌,济南,青岛,临沂,郑州,武汉,长沙,广州,南宁,海口,成都,贵阳,昆明,拉萨,西安,兰州,西宁,银川,乌鲁木齐,寻甸,泉州',
            '铁西区': '沈阳市',
            '西湖区': '杭州市',
            '涪陵区': '重庆市',
            '朝阳区': '北京市',
            '浦东新区': '上海市',
            '市南区': '青岛市'
        }

        for jd in soup.select('.search_result tr')[1:]:
            try:
                url = 'http://www.hotjob.cn' + jd.find('a').get('href')
                detail_soup = BeautifulSoup(requests.get(url, timeout = 5).text)
            except:
                continue

            name = detail_soup.find(class_ = 'position_title').get_text().strip()
            location = detail_soup.find(class_ = 'position_basic').find(text = re.compile('工作地点：')).parent.find_next_sibling().find('span').get('title')

            if location in loc:
                location = loc[location]

            count = detail_soup.find(class_ = 'position_basic').find(text = re.compile('招聘人数：')).parent.find_next_sibling().get_text()

            if count == '若干':
                count = -1
            else:
                count = int(count)

            try:
                edu = detail_soup.find(class_ = 'position_basic').find(text = re.compile('历：')).parent.find_next_sibling().get_text()
            except:
                edu = ''

            date = detail_soup.find(class_ = 'position_basic').find(text = re.compile('发布时间：')).parent.find_next_sibling().get_text()
            description = detail_soup.find(class_ = 'position_content').get_text()[:-80].strip()

            company = '中国中化集团公司'

            item = {
                "url": url,
                'edu': edu,
                'exp': [],
                'name': name,
                'date': date,
                'lang': '',
                'place': '',
                'major': '',
                'count': count,
                'salary': [],
                'toSchool': True,
                'welfare': [],
                'funType': '',
                'company': company,
                'location': location,
                'industry': '化工',
                'keywords': [],
                'platform': 'offical',
                'searchKeyword': '',
                'description': description,
                'subIndustry': '',
                'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            }

            result = offical_posts_coll.replace_one({'company': company, 'name': name, 'location': location}, item, True)
            print(item['company'], item['name'])


POST()
