# -*- coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
import datetime
import re
from pymongo import MongoClient
from pyquery import PyQuery as pq

server_client = MongoClient('127.0.0.1', 27017)

server_db = server_client['knx_posts_db']
offical_posts_coll = server_db['offical_posts_coll']

local_client = MongoClient('localhost', 27017)
local_db = local_client['knx_posts_db']
local_offical_posts_coll = local_db['offical_posts_coll']


class Spider:
    def __init__(self, website):
        self.company = "如家"
        self.industry = '服务业-酒店'
        self.website = website
        self.param = {}
        self.pages = 5
        self.curPage = 1
        self.urls = []
        self.fails = []
        self.getUrls()

    # get fails url 
    def error(self, url):
        self.fails.append(url)

    # error handel
    def errorHandel(self, urls):
        print('deal with :' + str(len(urls)))
        print(urls)

        if (urls and len(urls) > 0):
            self.detail(urls)
        else:
            pass
        pass

    # spider all urls
    def getUrls(self):

        # 配置
        website = self.website

        # request
        response = requests.get(website)
        print(response.url)
        response.encoding = "utf-8"
        # 判断是否页面200
        if (response.status_code == 200):

            document = BeautifulSoup(response.text, "html5lib")

            # 列表obj            
            liObjs = document.find(class_ = "posList").find_all('li')

            if (liObjs):
                for item in liObjs:
                    href = item.find_all("a")[0]["href"]

                    if (href):
                        self.urls.append("http://www.bthhotels.com" + href)
                    else:
                        print("a href is null")

                # print(self.urls)
            else:
                print("-----error:" + response.url)

        else:
            print("-----error:" + response.url)
            # self.fails.append(response.url)

        pass
        print(self.urls)
        print(len(self.urls))
        self.detail(self.urls)

    # get content
    def detail(self, urls):
        for _url in urls:
            response = requests.get(_url)
            # print(response.text)
            if (response.status_code == 200):
                document = BeautifulSoup(response.text, "html5lib")
                jQeury = pq(response.text)
                name = document.find(class_ = "head").get_text().strip()
                # detail info
                # print(document.find(class_='pos_command'))
                box = document.find(class_ = 'pos_command').find_all('li')

                edu = box[2].find(class_ = 'sec').get_text().strip()
                date = ''
                count = box[3].find(class_ = 'sec').get_text().strip()
                location = box[0].find(class_ = 'sec').get_text().strip()

                expValue = box[1].find(class_ = 'sec').get_text().strip()
                exp = re.findall(r"\d+\.?\d*", expValue)

                funType = ''
                # description
                # pTag = document.find(class_='Txt').find_all("p")
                description = "工作内容/职位描述：\n\n" + jQeury(".duty p").text().strip() + "任职资格：\n\n" + jQeury(".qualification p").text().strip()

                data = {
                    "url": _url,  # jd详情页的地址
                    'edu': edu,  # 最低学历
                    'exp': exp,  # 所需工作经验，比如[3, 5]表示3到5年, [3]表示3年，[]表示无经验要求
                    'name': name,  # 职位名称 *
                    'date': date,  # 职位发布日期，字符串形式即可，后期统一转换
                    'lang': '',  # 对语言的要求
                    'place': '',  # 办公具体地址
                    'major': '',  # 专业要求
                    'count': count,  # 招聘数量
                    'salary': [],  # 薪资待遇，[5000, 8000]表示月薪5到8千，[4000]表示4千，[]表示没有写明
                    'toSchool': True,  # 是否是面向校园招聘，本次官网抓取一律都是校园招聘，所以此处都是True
                    'welfare': [],  # 福利待遇，比如五险一金、十三薪之类的，保存成数组
                    'funType': funType,  # 职能类型，比如证券经纪人是证券 / 期货 / 外汇经纪人
                    'company': self.company,  # 企业名称
                    'location': location,  # 所在城市
                    'industry': self.industry,  # 企业所在行业
                    'keywords': [],  # 此岗位的搜索关键字
                    'platform': 'offical',  # 针对官网抓取时此处一律保存为offical
                    'searchKeyword': '',  # 搜索的关键字，由于是官网抓取所以此处一律为空字符串
                    'description': description,  # 职位的详细描述，包括职责、要求之类的
                    'subIndustry': '',  # 一律为空字符串
                    'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')  # 抓取时间
                }
                # print(data)
                # insert into dataDB

                if not offical_posts_coll.find_one({'name': data['name'], 'company': data['company'], 'location': data['location']}):
                    reid = offical_posts_coll.insert_one(data)

                    if (reid):
                        print("success:" + response.url)
                        pass
                    else:
                        self.error(response.url)
            else:
                self.error(response.url)
                print("-----error:" + response.url)
            pass

        self.errorHandel(self.fails)

    # parse html
    def formatHtml(self, _url):
        response = requests.get(_url)
        if (response.status_code == '200'):
            document = BeautifulSoup(response.text, "html5lib")

            name = document.find(class_ = "head").get_text().strip()
            # detail info
            box = document.find(class_ = 'pos_command clearfix').find_all('li')

            edu = box[2].find(class_ = 'sec').get_text().strip()
            date = ''
            count = box[3].find(class_ = 'sec').get_text().strip()
            location = box[0].find(class_ = 'sec').get_text().strip()

            expValue = box[1].find(class_ = 'sec').get_text().strip()
            exp = re.findall(r"\d+\.?\d*", expValue)

            funType = ''
            # description
            pTag = document.find(class_ = 'Txt').find_all("p")
            description = "工作内容/职位描述：\n\n" + pTag[0].get_text().strip() + "任职资格：\n\n" + pTag[1].get_text().strip()
            data = {
                "url": _url,  # jd详情页的地址
                'edu': edu,  # 最低学历
                'exp': exp,  # 所需工作经验，比如[3, 5]表示3到5年, [3]表示3年，[]表示无经验要求
                'name': name,  # 职位名称 *
                'date': date,  # 职位发布日期，字符串形式即可，后期统一转换
                'lang': '',  # 对语言的要求
                'place': '',  # 办公具体地址
                'major': '',  # 专业要求
                'count': count,  # 招聘数量
                'salary': [],  # 薪资待遇，[5000, 8000]表示月薪5到8千，[4000]表示4千，[]表示没有写明
                'toSchool': True,  # 是否是面向校园招聘，本次官网抓取一律都是校园招聘，所以此处都是True
                'welfare': [],  # 福利待遇，比如五险一金、十三薪之类的，保存成数组
                'funType': funType,  # 职能类型，比如证券经纪人是证券 / 期货 / 外汇经纪人
                'company': self.company,  # 企业名称
                'location': location,  # 所在城市
                'industry': self.industry,  # 企业所在行业
                'keywords': [],  # 此岗位的搜索关键字
                'platform': 'offical',  # 针对官网抓取时此处一律保存为offical
                'searchKeyword': '',  # 搜索的关键字，由于是官网抓取所以此处一律为空字符串
                'description': description,  # 职位的详细描述，包括职责、要求之类的
                'subIndustry': '',  # 一律为空字符串
                'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')  # 抓取时间
            }

            # insert into dataDB
            # reid = local_offical_posts_coll.insert(data)
            #
            # if (reid):
            #     pass
            # else:
            #     self.error(response.url)

            print(data['company'], data['name'])

            if not offical_posts_coll.find_one({'name': data['name'], 'company': data['company'], 'location': data['location']}):
                offical_posts_coll.insert_one(item)

        else:
            self.error(response.url)
            print("-----error:" + response.url + "-----")
        pass


sp = Spider("http://www.bthhotels.com/Recruit/JobList")

# sp = Spider("http://www.1kkk.com/manhua-jpkr/?sort=1")
