# -*- coding:utf-8 -*-
import requests
import datetime
import re
from pymongo import MongoClient
from pyquery import PyQuery as pq

server_client = MongoClient('127.0.0.1', 27017)

server_db = server_client['knx_posts_db']
offical_posts_coll = server_db['offical_posts_coll']

local_client = MongoClient('localhost', 27017)
local_db = local_client['knx_posts_db']
local_offical_posts_coll = local_db['offical_posts_coll']


class Spider:
    def __init__(self, website):
        self.company = "e洗袋"
        self.industry = '互联网-服务'
        self.website = website
        self.param = {}
        self.pages = 5
        self.curPage = 1
        self.urls = []
        self.fails = []
        self.getUrls()

    # get fails url 
    def error(self, url):
        self.fails.append(url)

    # error handel
    def errorHandel(self, urls):
        print('deal with :' + str(len(urls)))
        print(urls)

        if (urls and len(urls) > 0):
            self.detail(urls)
        else:
            pass
        pass

    # spider all urls
    def getUrls(self):

        # 配置
        website = self.website

        # request
        response = requests.get(website)
        print(response.url)
        response.encoding = "utf-8"
        # 判断是否页面200
        if (response.status_code == 200):

            jQeury = pq(response.text)

            # 列表obj            
            liObjs = jQeury(".s-list-box .f14.text-darkgray a")
            if (liObjs):
                for item in liObjs:

                    href = pq(item).attr("href")
                    if (href):
                        self.urls.append("http://www.hotjob.cn" + href)
                    else:
                        print("a href is null")

                # print(self.urls)
            else:
                print("-----error:" + response.url)

        else:
            print("-----error:" + response.url)
            # self.fails.append(response.url)

        pass
        print("-------")
        print(self.urls)
        print(len(self.urls))
        self.detail(self.urls)

    # get content
    def detail(self, urls):
        for _url in urls:
            response = requests.get(_url)
            # print(response.text)
            if (response.status_code == 200):
                jQeury = pq(response.text)

                name = jQeury(".f28.c333.mt30.ml20").text()

                # detail info
                # print(document.find(class_='pos_command'))
                box1 = jQeury(".c999.overhidden.lh24").eq(0).find("li")
                box2 = jQeury(".c999.overhidden.lh24").eq(1).find("li")

                edu = box1.eq(2).find('.c333').text()
                date = box2.eq(2).find('.c333').text()
                count = box2.eq(1).find('.c333').text()
                location = box1.eq(0).find('.c333').text()

                expValue = box1.eq(1).find('.c333').text()
                exp = re.findall(r"\d+\.?\d*", expValue)

                funType = box2.eq(0).find('.c333').text()
                # description
                # pTag = document.find(class_='Txt').find_all("p")
                description = "工作内容/职位描述：\n\n" + jQeury(".lh30.c666").eq(0).text().strip() + "任职资格：\n\n" + jQeury(".lh30.c666").eq(1).text().strip()

                data = {
                    "url": _url,  # jd详情页的地址
                    'edu': edu.strip(),  # 最低学历
                    'exp': exp,  # 所需工作经验，比如[3, 5]表示3到5年, [3]表示3年，[]表示无经验要求
                    'name': name.strip(),  # 职位名称 *
                    'date': date.strip(),  # 职位发布日期，字符串形式即可，后期统一转换
                    'lang': '',  # 对语言的要求
                    'place': '',  # 办公具体地址
                    'major': '',  # 专业要求
                    'count': count.strip(),  # 招聘数量
                    'salary': [],  # 薪资待遇，[5000, 8000]表示月薪5到8千，[4000]表示4千，[]表示没有写明
                    'toSchool': True,  # 是否是面向校园招聘，本次官网抓取一律都是校园招聘，所以此处都是True
                    'welfare': [],  # 福利待遇，比如五险一金、十三薪之类的，保存成数组
                    'funType': funType.strip(),  # 职能类型，比如证券经纪人是证券 / 期货 / 外汇经纪人
                    'company': self.company,  # 企业名称
                    'location': location.strip(),  # 所在城市
                    'industry': self.industry,  # 企业所在行业
                    'keywords': [],  # 此岗位的搜索关键字
                    'platform': 'offical',  # 针对官网抓取时此处一律保存为offical
                    'searchKeyword': '',  # 搜索的关键字，由于是官网抓取所以此处一律为空字符串
                    'description': description.strip(),  # 职位的详细描述，包括职责、要求之类的
                    'subIndustry': '',  # 一律为空字符串
                    'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')  # 抓取时间
                }
                if (data['name'] == ''):
                    continue
                # insert into dataDB
                reid = local_offical_posts_coll.insert(data)

                if (reid):
                    print("success:" + response.url)
                    pass
                else:
                    self.error(response.url)
            else:
                self.error(response.url)
                print("-----error:" + response.url)
            pass

        self.errorHandel(self.fails)

    pass

# sp = Spider("http://www.hotjob.cn/wt/Transfar/web/index")
