# -*- coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
import datetime

from pymongo import MongoClient

server_client = MongoClient('47.104.130.19', 27017)

server_db = server_client['knx_posts_db']
server_offical_posts_coll = server_db['offical_posts_coll']

local_client = MongoClient('localhost', 27017)
local_db = local_client['knx_posts_db']
local_offical_posts_coll = local_db['offical_posts_coll']


class Spider:
    def __init__(self,website):
        self.company = "德勤"
        self.industry = '服务业-金融'
        self.website = website
        self.param = {
            "pc.currentPage": 1,
            "pc.rowSize": 10,
            "orgId": '',
            "releaseTime": '',
            "keyWord": '',
            "positionType": '',
            "trademark": 1,
            "workPlace": '',
            "useForm": '',
            "positionName": '',
            "recruitType": 1,
            "specialRecruitmentId": '',
            "brandCode": 1,
            "searchType": 1,
            "blockType": 1,
            "contentModuleType": '',
            "tagType": '',
            "comPart": '',
            "orderInt": 0,
            "workPlaceNum": '',
            "workTypeNum": '',
            "workCompanyNum": '',
            "colPoitionName": 'a',
            "colPositionType": '',
            "colPositionCompany": '',
            "colPostionRecruit": '',
            "colPositionWorkPlace": 'e',
            "colPostionReleaseTime": '',
            "positionNameLength": 430,
            "positionTypeLength": '',
            "postionCompanyLength": '',
            "positionRecruitLength": '',
            "positionWorkPlaceLength": '',
            "postionReleaseTimeLength": '',
            "positionColUseDefault": 1,
            "keyWordV": '',
            "workPlaceNameV": '',
            "comPartV": '',
            "sicCorpCodeV": '',
            "sort_order": "by_time"
        }
        self.pages = 5
        self.curPage = 1 
        self.urls = []
        self.fails = []
        self.getUrls()
    
   
    
    # error handel
    def error(self,url):
        self.fails.append(url)

    def log(self,data):
        # file = open('log.txt')
        # file.write(' '.join(data))
        # file.close()
        pass
    # spider all urls
    def getUrls(self):
        page = 1
        while(page <= self.pages):
            # 配置
            website = self.website
            self.param["pc.currentPage"] = page  
                    
            # request
            response = requests.post(website,data=self.param)
            print(response.url)
            response.encoding = "utf-8"
            # 判断是否页面200
            if(response.status_code == 200):               
               
                document = BeautifulSoup(response.text,"html5lib")
                
                # 设置总页数
                if(page == 1):                                
                
                    a_el = document.find(class_="page_div").find('table').find_all('a')
                    if(len(a_el)>0):
                        href = a_el[-1]['href']
                        if(len(href)>0):
                            arry = href.split(',')
                            print(arry[1])
                            pageCount = arry[1]
                        else:
                            pass
                    else:
                        pass

                    if(pageCount):
                        self.pages = int(pageCount)
                    else:
                        self.pages = 5                           
                else:
                    pass

                # 列表obj
                
                
                liObjs = document.find(class_="search_result").find_all("a")

               
                if(liObjs):                                              
                    for item in liObjs:
                        href = item["href"]

                        if(href):
                            self.urls.append("http://deloitte.wintalent.cn"+href)
                        else:
                            print("a href is null")
                    
                    # print(self.urls)
                else:
                    print("-----error:"+response.url+"-----")
                
            else:
                print("-----error:"+response.url+"-----")
                # self.fails.append(response.url)
            # loop
            page = page+1  
        pass
        print(len(self.urls))
        self.detail()

    # get content
    def detail(self):
        for _url in self.urls:
            response = requests.get(_url)
            if(response.status_code == 200):
                document = BeautifulSoup(response.text,"html5lib")

                name = document.find(class_="position_title").find('span').get_text().strip()
                # detail info
                box = document.find(class_='position_basic clear').find_all('li')

                edu = box[5].find('font')['title']
                date = box[7].find_all('span')[1].get_text().strip()
                count = box[4].find_all('span')[1].get_text().strip()
                location = box[3].find_all('span')[2].get_text().strip()

                funType=''
                # description
                pTag = document.find(class_='position_content').find_all("p")
                description = "工作内容/职位描述：\n\n"+pTag[0].get_text().strip()+"任职资格：\n\n"+pTag[1].get_text().strip()
                data = {
                    "url": _url, #jd详情页的地址
                    'edu': edu, # 最低学历
                    'exp': [], # 所需工作经验，比如[3, 5]表示3到5年, [3]表示3年，[]表示无经验要求
                    'name': name , # 职位名称 *
                    'date': date, # 职位发布日期，字符串形式即可，后期统一转换
                    'lang': '', # 对语言的要求
                    'place': '', # 办公具体地址
                    'major': '', # 专业要求
                    'count': count, # 招聘数量
                    'salary': [], # 薪资待遇，[5000, 8000]表示月薪5到8千，[4000]表示4千，[]表示没有写明
                    'toSchool': True, # 是否是面向校园招聘，本次官网抓取一律都是校园招聘，所以此处都是True
                    'welfare': [], # 福利待遇，比如五险一金、十三薪之类的，保存成数组
                    'funType': funType, # 职能类型，比如证券经纪人是证券 / 期货 / 外汇经纪人
                    'company': self.company, # 企业名称
                    'location': location, # 所在城市
                    'industry': self.industry, # 企业所在行业
                    'keywords': [], # 此岗位的搜索关键字
                    'platform': 'offical', # 针对官网抓取时此处一律保存为offical
                    'searchKeyword': '', # 搜索的关键字，由于是官网抓取所以此处一律为空字符串
                    'description': description, # 职位的详细描述，包括职责、要求之类的
                    'subIndustry': '', # 一律为空字符串
                    'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # 抓取时间
                }           
                
                # insert into dataDB
                reid = server_offical_posts_coll.insert(data)               

                if(reid):
                    pass
                else:
                    self.error(response.url)
               
            else:
                self.error(response.url)
        self.log(self.fails)

        

sp = Spider("http://deloitte.wintalent.cn/wt/Deloitte/web/index/webPosition210!getPostListByConditionShowPic?columnId=1&operational=73a64e2ef2c35f601b19180f12270d548f6e959893dce91d7cf7865ce19e13a12fd5e66083b7e0fdba23aa9fdce156d7cfc3d5af195bb08f95d887a6b2a43e0d1585487ae444d8331e40d7a8d25b9caaedfeaaa1792ce0aacb48a0120fa571bf009b963acb1134ba")

# sp = Spider("http://www.1kkk.com/manhua-jpkr/?sort=1")