# coding=utf-8

from pymongo import MongoClient
from bs4 import BeautifulSoup
import requests
import datetime
import random
import pprint
import json
import time
import re

server_client = MongoClient('127.0.0.1', 27017)

server_db = server_client['knx_posts_db']
offical_posts_coll = server_db['offical_posts_coll']


class POST():
    def __init__(self):
        self.company = '北京中软国际教育科技股份有限公司'
        self.url = "http://zhaopin.chinasoftinc.com//applicant/loadTable"
        self.cookie = dict()

        self.params = {
            'random': '0.22912874985639053'
        }

        self.data = {
            '_search': False,
            'nd': 1536560922276,
            'rows': 25,
            'page': 1,
            'sidx': 'publish_time',
            'sord': 'desc'
        }

        self.headers = {
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
            'Connection': 'keep-alive',
            'Content-Length': '73',
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'Cookie': 'JSESSIONID=EF43DDB5467593F1E45174A092DAB5F5',
            'Host': 'zhaopin.chinasoftinc.com',
            'Origin': 'http://zhaopin.chinasoftinc.com',
            'Referer': 'http://zhaopin.chinasoftinc.com/jsp/views/applicant/positionVacant.jsp',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
            'X-Requested-With': 'XMLHttpRequest'
        }

        self.login()
        self.scrapy()

    def login(self):
        headers = {
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
            'Connection': 'keep-alive',
            'Content-Length': '34',
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'Host': 'zhaopin.chinasoftinc.com',
            'Origin': 'http://zhaopin.chinasoftinc.com',
            'Referer': 'http://zhaopin.chinasoftinc.com/jsp/views/login/login.jsp',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
            'X-Requested-With': 'XMLHttpRequest'
        }
        r = requests.post('http://zhaopin.chinasoftinc.com/common/login?random=0.22912874985639053', data = {
            'account': '13735863577',
            'pwd': '1991303017'
        }, headers = headers)

        self.cookie.update(r.cookies)

    def scrapy(self):
        self.data['nd'] = int(time.time())
        print(self.cookie)
        print(self.data)
        print(self.params)

        r = requests.post(self.url, params = self.params, data = self.data, headers = self.headers, cookies = self.cookie)
        print(r.text)
        return
        j = r.json()

        for i in range(1, j['total'] + 1):
            self.data['page'] = i
            r = requests.post(self.url, params = self.params, data = self.data, headers = self.headers)
            j = r.json()

            for job in j['rows']:
                url = 'http://zhaopin.chinasoftinc.com/jsp/views/applicant/positionVacant.jsp'
                name = job['jobName']
                location = job['workPlace']
                count = job['recruitNumber']
                edu = ''
                date = job['publishTime']
                description = '岗位职责\n' + job['jobDuty'] + '\n任职资格\n' + job['jobQualifications']
                item = {
                    "url": url,  # jd详情页的地址
                    'edu': edu,  # 最低学历
                    'exp': [],  # 所需工作经验，比如[3, 5]表示3到5年, [3]表示3年，[]表示无经验要求
                    'name': name,  # 职位名称 *
                    'date': date,  # 职位发布日期，字符串形式即可，后期统一转换
                    'lang': '',  # 对语言的要求
                    'place': '',  # 办公具体地址
                    'major': '',  # 专业要求
                    'count': count,  # 招聘数量
                    'salary': [],  # 薪资待遇，[5000, 8000]表示月薪5到8千，[4000]表示4千，[]表示没有写明
                    'toSchool': True,  # 是否是面向校园招聘，本次官网抓取一律都是校园招聘，所以此处都是True
                    'welfare': [],  # 福利待遇，比如五险一金、十三薪之类的，保存成数组
                    'funType': '',  # 职能类型，比如证券经纪人是证券 / 期货 / 外汇经纪人
                    'company': self.company,  # 企业名称
                    'location': location,  # 所在城市
                    'industry': 'IT互联科技行业',  # 企业所在行业
                    'keywords': [],  # 此岗位的搜索关键字
                    'platform': 'offical',  # 针对官网抓取时此处一律保存为offical
                    'searchKeyword': '',  # 搜索的关键字，由于是官网抓取所以此处一律为空字符串
                    'description': description,  # 职位的详细描述，包括职责、要求之类的
                    'subIndustry': '',  # 一律为空字符串
                    'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')  # 抓取时间
                }
                print(item['company'], item['name'])

                if not offical_posts_coll.find_one({'name': item['name'], 'company': item['company'], 'location': item['location']}):
                    offical_posts_coll.insert_one(item)


POST()
