from pymongo import MongoClient
from bs4 import BeautifulSoup
from pprint import pprint

import requests
import datetime
import json
import time
import re

server_client = MongoClient('localhost', 27017)

server_db = server_client['knx_posts_db']
offical_posts_coll = server_db['offical_posts_coll']


class POST():
    def __init__(self):
        self.scrapy()

    def scrapy(self):
        url = 'http://customer.citics.com/login/positionAction.do'
        params = {
            'method': 'listPositionByPageFromIndex',
            'type': '08',
            'url': '/login/positionAction',
            'extendUrl': 'type=08;url=/login/positionAction'
        }
        r = requests.post(url, params = params)
        soup = BeautifulSoup(r.text)

        for jd in soup.select('.joblongDate tr')[1:]:
            name = jd.select('td')[0].get_text().strip()

            url = 'http://customer.citics.com' + jd.select('td')[2].find('a').get('href')
            detail_soup = BeautifulSoup(requests.get(url).text)
            company = '中信证券股份有限公司'

            location = detail_soup.find(text = re.compile('工作地点：')).parent.parent.find_next_sibling().get_text()
            description0 = detail_soup.find(text = re.compile('岗位职责说明')).parent.parent.find_next_sibling().get_text()
            description1 = detail_soup.find(text = re.compile('任职资格要求')).parent.parent.find_next_sibling().get_text()

            item = {
                "url": url,
                'edu': '',
                'exp': [],
                'name': name,
                'date': '',
                'lang': '',
                'place': '',
                'major': '',
                'count': '',
                'salary': [],
                'toSchool': True,
                'welfare': [],
                'funType': '',
                'company': company,
                'location': location,
                'industry': '证券',
                'keywords': [],
                'platform': 'offical',
                'searchKeyword': '',
                'description': description0 + '\n' + description1,
                'subIndustry': '',
                'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            }

            # pprint(item)
            result = offical_posts_coll.replace_one({'company': company, 'name': name, 'location': location}, item, True)

            if result.matched_count:
                pprint(item)
                print('-' * 40, 'update one job', '-' * 40)
            else:
                pprint(item)
                print('-' * 40, 'insert one job', '-' * 40)


POST()
