# -*- coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
import datetime
import json
from pymongo import MongoClient

server_client = MongoClient('127.0.0.1', 27017)

server_db = server_client['knx_posts_db']
offical_posts_coll = server_db['offical_posts_coll']

local_client = MongoClient('localhost', 27017)
local_db = local_client['knx_posts_db']
local_offical_posts_coll = local_db['offical_posts_coll']


class Spider:
    def __init__(self, website):
        self.company = "洲际酒店"
        self.industry = '服务业-酒店'
        self.website = website
        self.param = {
            "sSearch_1": "||9314,9315|||",
            "sSearch_2": "",
            "ReqType": 2,
            "sEcho": 1,
            "iColumns": 20,
            "iDisplayStart": 0,
            "iDisplayLength": 20,
            "sSortCol_0": 0,
            "sSortDir_0": "asc"
        }

        self.pages = 21
        self.curPage = 1
        self.urls = []
        self.fails = []
        self.getUrls()

    # error handel
    def error(self, url):
        self.fails.append(url)

    def log(self, data):
        # file = open('log.txt')
        # file.write(' '.join(data))
        # file.close()
        pass

    # spider all urls
    def getUrls(self):
        while (self.param["iDisplayStart"] < self.pages):

            res = requests.get('http://www.ihgjobs.cn/lumesse/jobsearch.aspx', params = self.param)

            jsonData = json.loads(res.text)

            jbs = jsonData['jobList']

            if (jsonData["iTotalDisplayRecords"] > self.param["iDisplayStart"]):
                self.param["iDisplayStart"] = self.param["iDisplayStart"] + 20

                self.pages = jsonData["iTotalDisplayRecords"]
                print(self.param["iDisplayStart"])
                print(self.pages)
            else:
                pass
            for item in jbs:
                getParam = {
                    "reqType": 5,
                    "sEcho": 9,
                    "postingtargetid": item['jobID']
                }
                res_detail = requests.get('http://www.ihgjobs.cn/lumesse/jobsearch.aspx', params = getParam)

                res_detail.encoding = "utf-8"
                if (res_detail):
                    jsonData2 = json.loads(res_detail.text)
                    # print(jsonData2)
                    # des
                    detailJson = jsonData2['jobDescription']["customFields"]

                    description = ""

                    # print(detailJson)
                    for k in detailJson:
                        for j in k:
                            description += j + "\n\n" + k[j]
                    pass
                    if ('province' in jsonData2['jobDescription'] and 'jobOrg' in jsonData2['jobDescription']):
                        place = jsonData2['jobDescription']['province'] + jsonData2['jobDescription']['jobOrg']
                    else:
                        place = ''
                    # item
                    edu = ''
                    name = item["jobTitle"]
                    count = ''
                    funType = jsonData2['jobDescription']['jobGrade']
                    data = {
                        "url": "http://www.ihgjobs.cn/search/job-description/" + item['jobID'],  # jd详情页的地址
                        'edu': edu,  # 最低学历
                        'exp': [],  # 所需工作经验，比如[3, 5]表示3到5年, [3]表示3年，[]表示无经验要求
                        'name': name,  # 职位名称 *
                        'date': item["postdate"],  # 职位发布日期，字符串形式即可，后期统一转换
                        'lang': '',  # 对语言的要求
                        'place': place,  # 办公具体地址
                        'major': '',  # 专业要求
                        'count': count,  # 招聘数量
                        'salary': [],  # 薪资待遇，[5000, 8000]表示月薪5到8千，[4000]表示4千，[]表示没有写明
                        'toSchool': True,  # 是否是面向校园招聘，本次官网抓取一律都是校园招聘，所以此处都是True
                        'welfare': [],  # 福利待遇，比如五险一金、十三薪之类的，保存成数组
                        'funType': funType,  # 职能类型，比如证券经纪人是证券 / 期货 / 外汇经纪人
                        'company': self.company,  # 企业名称
                        'location': item["location"],  # 所在城市
                        'industry': self.industry,  # 企业所在行业
                        'keywords': [],  # 此岗位的搜索关键字
                        'platform': 'offical',  # 针对官网抓取时此处一律保存为offical
                        'searchKeyword': '',  # 搜索的关键字，由于是官网抓取所以此处一律为空字符串
                        'description': description,  # 职位的详细描述，包括职责、要求之类的
                        'subIndustry': '',  # 一律为空字符串
                        'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')  # 抓取时间
                    }

                    print(data['company'], data['name'])
                    if not offical_posts_coll.find_one({'name': data['name'], 'company': data['company'], 'location': data['location']}):
                        reid = offical_posts_coll.insert_one(data)

                        if (reid):
                            pass
                        else:
                            self.error(res_detail.url)
                else:
                    self.error(res_detail.url)
        pass
        print(self.fails)


pass

sp = Spider("http://deloitte.wintalent.cn/wt/Deloitte/web/index/webPosition210!getPostListByConditionShowPic?columnId=1&operational=73a64e2ef2c35f601b19180f12270d548f6e959893dce91d7cf7865ce19e13a12fd5e66083b7e0fdba23aa9fdce156d7cfc3d5af195bb08f95d887a6b2a43e0d1585487ae444d8331e40d7a8d25b9caaedfeaaa1792ce0aacb48a0120fa571bf009b963acb1134ba")

# sp = Spider("http://www.1kkk.com/manhua-jpkr/?sort=1")
