from pymongo import MongoClient
from selenium import webdriver
from bs4 import BeautifulSoup
from pprint import pprint

import requests
import datetime
import json
import time
import re

server_client = MongoClient('localhost', 27017)

server_db = server_client['knx_posts_db']
offical_posts_coll = server_db['offical_posts_coll']

headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Content-Type': 'application/x-www-form-urlencoded',
    'Host': 'astrazeneca.51job.com',
    # 'Cookie': 'guid=15199559711214130036; nsearch=jobarea%3D%26%7C%26ord_field%3D%26%7C%26recentSearch0%3D%26%7C%26recentSearch1%3D%26%7C%26recentSearch2%3D%26%7C%26recentSearch3%3D%26%7C%26recentSearch4%3D%26%7C%26collapse_expansion%3D; 51job=cenglish%3D0%26%7C%26; search=jobarea%7E%60020000%2C010000%2C030200%2C040000%2C080200%7C%21ord_field%7E%600%7C%21recentSearch0%7E%601%A1%FB%A1%FA020000%2C010000%2C030200%2C040000%2C080200%2C00%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA%BD%CC%CA%A6%A1%FB%A1%FA2%A1%FB%A1%FA%A1%FB%A1%FA-1%A1%FB%A1%FA1520490916%A1%FB%A1%FA0%A1%FB%A1%FA%A1%FB%A1%FA%7C%21recentSearch1%7E%601%A1%FB%A1%FA020000%2C010000%2C030200%2C040000%2C080200%2C00%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FAiOS%A1%FB%A1%FA2%A1%FB%A1%FA%A1%FB%A1%FA-1%A1%FB%A1%FA1520490822%A1%FB%A1%FA0%A1%FB%A1%FA%A1%FB%A1%FA%7C%21recentSearch2%7E%601%A1%FB%A1%FA020000%2C010000%2C030200%2C040000%2C080200%2C00%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA%C7%B0%B6%CB%BF%AA%B7%A2%A1%FB%A1%FA2%A1%FB%A1%FA%A1%FB%A1%FA-1%A1%FB%A1%FA1520490751%A1%FB%A1%FA0%A1%FB%A1%FA%A1%FB%A1%FA%7C%21recentSearch3%7E%601%A1%FB%A1%FA020000%2C010000%2C030200%2C040000%2C080200%2C00%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA%CF%FA%CA%DB%A1%FB%A1%FA2%A1%FB%A1%FA%A1%FB%A1%FA-1%A1%FB%A1%FA1520490442%A1%FB%A1%FA0%A1%FB%A1%FA%A1%FB%A1%FA%7C%21recentSearch4%7E%601%A1%FB%A1%FA020000%2C010000%2C030200%2C040000%2C080200%2C00%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA%B2%FA%C6%B7%BE%AD%C0%ED%A1%FB%A1%FA2%A1%FB%A1%FA%A1%FB%A1%FA-1%A1%FB%A1%FA1520490155%A1%FB%A1%FA0%A1%FB%A1%FA%A1%FB%A1%FA%7C%21collapse_expansion%7E%601%7C%21',
    'Origin': 'http://astrazeneca.51job.com',
    'Pragma': 'no-cache',
    'Referer': 'http://astrazeneca.51job.com/',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'
}

detail_headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'astrazeneca.51job.com',
    'Pragma': 'no-cache',
    'Referer': 'http://astrazeneca.51job.com/index.php?begin=begin',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'
}


class POST():
    def __init__(self):
        self.scrapy()

    def chrome_driver_scrapy(self):
        pass

    def scrapy(self):
        for i in range(1, 30):
            try:
                r = requests.post('http://astrazeneca.51job.com/index.php', headers = headers, data = {
                    'pages': '29',
                    'keyword': '',
                    'keywordtype': '2',
                    'jobarea': '',
                    'codivid': '',
                    'company': '',
                    'funtype': '',
                    'issuedate': '',
                    'division': '',
                    'poscode': '',
                    'page': str(i)
                }, params = {'begin': 'begin'}, timeout = 10)
                r.encoding = 'gbk'
                print(i)
            except:
                print('can not scrapy for page' + str(i))
                continue

            soup = BeautifulSoup(r.text)

            for jd in soup.select('.unnamed2 table tr'):
                name = jd.select('td')[0].get_text().strip()

                if name == '':
                    continue

                location = jd.select('td')[1].text.strip()
                count = jd.select('td')[3].text.strip()
                date = jd.select('td')[4].text.strip()

                url = jd.select('td')[0].find('a').get('href')

                try:
                    r = requests.get(url, headers = detail_headers, timeout = 5)
                    r.encoding = 'gbk'
                    detail_soup = BeautifulSoup(r.text)
                except:
                    print('can not scrapy detail info')
                    continue

                edu = detail_soup.find(text = re.compile('学　　历：')).parent.find_next_sibling().text.strip()
                lang = detail_soup.find(text = re.compile('语言要求：')).parent.find_next_sibling().text.strip()
                description = detail_soup.find(text = re.compile('职位描述')).parent.parent.parent.find_next_sibling().get_text().strip()

                item = {
                    "url": url,
                    'edu': edu,
                    'exp': [],
                    'name': name,
                    'date': date,
                    'lang': lang,
                    'place': '',
                    'major': '',
                    'count': count,
                    'salary': [],
                    'toSchool': True,
                    'welfare': [],
                    'funType': '',
                    'company': '阿斯利康投资（中国）有限公司',
                    'location': location,
                    'industry': '医药',
                    'keywords': [],
                    'platform': 'offical',
                    'searchKeyword': '',
                    'description': description.strip(),
                    'subIndustry': '',
                    'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                }

                result = offical_posts_coll.replace_one({'company': '阿斯利康投资（中国）有限公司', 'name': name, 'location': location}, item, True)

                if result.matched_count:
                    pprint(item)
                    print('-' * 40, 'update one job', '-' * 40)
                else:
                    pprint(item)
                    print('-' * 40, 'insert one job', '-' * 40)

                # pprint(item)


POST()
