# -*- coding:utf-8 -*-
# !/usr/bin/env
# @description LaGou采集脚本文件
# @author:jack.spanrrows
# @datetime: 2018/07/31 00:20
# @copyRight jack.spanrrows@gmail.com

from datetime import datetime, timedelta
from pymongo import MongoClient as client
from urllib import request
import time
import pycurl
import json
from time import sleep
import ssl
from io import *
import lxml
from bs4 import BeautifulSoup
import os
import sys
import re
from scrapy_web_py3.librarys import MyThread
sys.setrecursionlimit(10000000)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
DOCKER_IDC_RC = "/www/web/IDC_RC"
IDC_RC = "/var/html/IDC_RC"
IDC_ONLINE = "/www/web/IDC_ONLINE"
db = True
constants = True
HTML_PARSER = True
if os.path.isfile(DOCKER_IDC_RC) or os.path.isfile(IDC_RC):
    from scrapy_web_py3.config.testing import constants

    HTML_PARSER = True
elif os.path.isfile(IDC_ONLINE):
    from scrapy_web_py3.config.online import constants

    HTML_PARSER = True
from scrapy_web_py3.librarys import common
from scrapy_web_py3.librarys import MongoCache as mongo
from scrapy_web_py3.librarys import MysqlConnect as mysql
from scrapy_web_py3.librarys import Logger

# 绕过https安全协议
if hasattr(ssl, '_create_unverified_context'):
    ssl._create_default_https_context = ssl._create_unverified_context
sys.stdout.flush()


class LagouSpider():
    def __init__(self, url = None, date = None, source = None, mongo_type = constants.GET, expires = None, mongo_key = None, mongo_value = None, type = constants.ADD_MIN_TIMESTAMP):
        self.url = url
        self.date = date
        self.source = source
        self.mongo_key = mongo_key
        self.mongo_value = mongo_value
        self.expires = expires
        self.mongo_type = mongo_type
        self.get_init_self_item__()

    def get_cache_html(self):
        if self.mongo_type == constants.GET:
            self.html = mongo.MongoCache(url = self.url, expires = self.expires, mongo_key = self.mongo_key, mongo_value = self.mongo_value, type = self.type).get_data()


    def get_init_self_item__(self):
        self.type = type
        self.json_code = 'code'
        self.json_message = 'message'
        self.json_status = 'status'
        self.json_result = 'result'
        self.event = "[当前采集页面 -- %s]" % constants.LAGOU_URL
        self.run_logs_dir = constants.run_logs_dir
        self.checkDirExists()
        self.logs = Logger.Logger(constants.run_logs, 1, self.event)
        self.run_logs_file = constants.run_logs
        self.mongo_logs_file = constants.mongo_logs
        self.db_logs_file = constants.db_logs

    def checkDirExists(self):
        '''
        检测目录是否存在 不存在则创建
        :return:
        '''
        common.check_dir_exists(self.run_logs_dir)

    def run_get_category(self, url=None, mongo_key=None):
        if url is None:
            self.url = constants.LAGOU_URL
        else:
            self.url = url
        if mongo_key is None:
            self.mongo_key = constants.LAGOU_KEY
        else:
            self.mongo_key = mongo_key
        self.html = mongo.MongoCache(url=self.url, mongo_key=self.mongo_key).get_data(source=self.url, encoding="utf-8")
        self.get_category()

    def get_category(self):
        '''
        获取职位分类表数据 一次即可
        Returns:

        '''
        if HTML_PARSER is True:
            soup = BeautifulSoup(self.html, 'html.parser')
        else:
            soup = BeautifulSoup(self.html, 'lxml')
        rel = soup.find_all(class_=re.compile("menu_box"))
        for r in rel:
            up_category = r.find('h2').text
            content = str(up_category).replace("\n", '')
            content = str(content).replace("\t\t", '')
            content = str(content).replace(" ", '')
            content = str(content).replace("\n\n", '')
            content = content.strip()
            if content == '' or content is None:
                pass
            elif content.find("\n") > 0:
                pass
            else:
                # pass
                sql = "INSERT INTO %s(category) VALUES('%s')" % (constants.TABLE_CATEGORY, content)
                message = "新增数据表%s -- query -- %s" % (constants.TABLE_CATEGORY, sql)
                common.var_dump(message, self.db_logs_file)
                # self.logs.info(message)
                upId = mysql.MysqlConnect().save_one_data(sql)
                message = "新增数据表%s -- id -- %s" % (constants.TABLE_CATEGORY, upId)
                common.var_dump(message, self.db_logs_file)
            spanList = []
            dl_list = []
            spans = r.find_all('span')
            for span in spans:
                dl_list = r.find_all('dl')
                # dl_list 每个小分类
                spanList.append(span.text)
            for dl in dl_list:
                dl_href = dl.find_all('a')
                dl_text = dl.text
                dl_text_list = dl_text.split('\n')

                # 每个小分类循环
                for span in dl_text_list:
                    if span == "" or span is None:
                        pass
                    else:
                        span = str(span).strip()
                        if span in spanList:
                            sql = "INSERT INTO %s(category,up_id) VALUES('%s', '%s')" % (constants.TABLE_CATEGORY, span, upId)
                            message = "新增数据表%s -- query -- %s" % (constants.TABLE_CATEGORY, sql)
                            common.var_dump(message, self.db_logs_file)
                            lastId = mysql.MysqlConnect().save_one_data(sql)
                            message = "新增数据表%s -- id -- %s" % (constants.TABLE_CATEGORY, lastId)
                            common.var_dump(message, self.db_logs_file)
                            for href in dl_href:
                                sql = "INSERT INTO %s(category_name, cid, category_url) VALUES('%s', '%s', '%s')" % (constants.TABLE_CATEGORY_LIST, href.text, lastId, href['href'])
                                message = "新增数据表%s -- query -- %s" % (constants.TABLE_CATEGORY_LIST, sql)
                                common.var_dump(message, self.db_logs_file)
                                mysql.MysqlConnect().save_one_data(sql)

    def get_category_url(self):
        """
        保存网络采集的url地址
        Returns:

        """
        # url = "https://www.lagou.com/zhaopin/Java/"
        self.error_page = []
        sql = "SELECT id, category_url FROM %s where id > 177" % constants.TABLE_CATEGORY_LIST
        data = mysql.MysqlConnect().get_fetch_all('%s' % sql)
        common.var_dump('query:%s' % sql, self.db_logs_file)
        ERROR_OPEN = False

        for item in data:
            common.var_dump("当前开始采集网址url: %s" % item[1])
            sleep(1)
            if ERROR_OPEN is False:
                for i in range(1, 30):
                    if i>21:
                        filter_option = 3
                    else:
                        filter_option = 2
                    url = "%s%d/?filterOption=%d" % (item[1], i, filter_option)
                    common.var_dump("当前拼接url地址:%s" % url, self.run_logs_file)
                    id = item[0]
                    sleep(2)
                    self.save_company_url(url, id)
            else:
                #     将错误页面进行重新采集
                key = 'status'
                value = 2
                field_dict = {'_id': 1}
                get_ids = mongo.MongoCache().find_more(key, value, field_dict=field_dict)
                for _id in get_ids:
                    if item[1] in str(_id['_id']):
                        id = item[0]
                        sleep(2)
                        # print(_id['_id'])
                        url = str(_id['_id'])
                        thread1 = MyThread.MyThread(func=self.save_company_url, args=url , type=1, args2=id)
                        thread1.start()
                        thread1.join()
                        # self.save_company_url(url, id)
            common.var_dump("当前采集网址url: %s -- 采集完毕" % item[1])

        # for url in self.error_page:
        #     pass
    def save_company_url(self, url, id):
        """
        保存公司相关基础信息
        Args:
            url:
            id:

        Returns:

        """
        field_dict2 = {'html': 1}
        get_mongo_data = mongo.MongoCache().find_one(key="_id", val=url, field_dict=field_dict2)
        message = "正在采集网址:%s" % url
        # url = "https://www.lagou.com/zhaopin/Java/6/?filterOption=2"
        common.var_dump(message, self.run_logs_file)
        timestamp = datetime.utcnow()
        if get_mongo_data is None or get_mongo_data is False:
            self.html = common.curl_post(url, encoding="utf-8")
            if self.html is None or self.html is False or self.html.find('302 Found')>0:
                common.var_dump("当前网址采集失败: %s" % url)
                sleep(5)
                self.html = common.curl_post(url, encoding="utf-8")
                if self.html.find('302 Found')>0:
                    if url not in self.error_page:
                        self.error_page.append(url)
                    mongo_dict = {constants.JOBS_MONGO_KEY: timestamp, 'add_date': common.get_init_time(), 'status': 2, 'html': self.html, 'source': constants.SOURCE_URL, 'type': constants.CATEGORY_TYPE, 'category_id': id}
                    mongo.MongoCache(url, constants.CATEGORY_MONGO_EXPIRES, constants.CATEGORY_MONGO_KEY, constants.CATEGORY_MONGO_VALUE).create_one_data(_id=url, data=mongo_dict)
                else:
                    mongo_dict = {constants.JOBS_MONGO_KEY: timestamp, 'add_date': common.get_init_time(), 'status': 1, 'html': self.html, 'source': constants.SOURCE_URL, 'type': constants.CATEGORY_TYPE, 'category_id': id}
                    # 分类主页缓存时间5分钟
                    mongo.MongoCache(url, constants.CATEGORY_MONGO_EXPIRES, constants.CATEGORY_MONGO_KEY, constants.CATEGORY_MONGO_VALUE).create_one_data(_id=url, data=mongo_dict)
            else:
                mongo_dict = {constants.JOBS_MONGO_KEY: timestamp, 'add_date': common.get_init_time(), 'status': 1, 'html': self.html, 'source': constants.SOURCE_URL, 'type': constants.CATEGORY_TYPE, 'category_id': id}
                # 分类主页缓存时间5分钟
                mongo.MongoCache(url, constants.CATEGORY_MONGO_EXPIRES, constants.CATEGORY_MONGO_KEY, constants.CATEGORY_MONGO_VALUE).create_one_data(_id=url, data=mongo_dict)
        else:
            self.html = get_mongo_data['html']
            if self.html.find('302 Found')>0:
                sleep(3)
                self.html = common.curl_post(url, encoding="utf-8")
                if self.html.find('302 Found') > 0:
                    if url not in self.error_page:
                        self.error_page.append(url)
                    pass
                else:
                    mongo_dict = {constants.JOBS_MONGO_KEY: timestamp, 'add_date': common.get_init_time(), 'status': 1, 'html': self.html, 'source': constants.SOURCE_URL, 'type': constants.CATEGORY_TYPE, 'category_id': id}
                    # 分类主页缓存时间5分钟
                    mongo.MongoCache(url, constants.CATEGORY_MONGO_EXPIRES, constants.CATEGORY_MONGO_KEY, constants.CATEGORY_MONGO_VALUE).create_one_data(_id=url, data=mongo_dict)
        if self.html.find('302 Found') > 0 :
            if url not in self.error_page:
                self.error_page.append(url)
            pass
        else:
            # self.html = mongo.MongoCache(url=url, mongo_key=constants.LAGOU_KEY).get_data(source=url, encoding="utf-8")
            if HTML_PARSER is True:
                soup = BeautifulSoup(self.html, 'html.parser')
            else:
                soup = BeautifulSoup(self.html, 'lxml')
            # rel = soup.find_all(class_=re.compile("position_link"))
            rel = soup.find_all(class_=re.compile('item_con_list'))
            html_a = []
            for r in rel:
                html_a = r.find_all('a')

            if html_a is not None:
                for a in html_a:
                    if str(a['href']).find('jobs') > 0:
                        if a.text is None or a.text is "" or a.text is False:
                            pass
                        else:
                            http_url = a['href']
                            job_text = str(a.text).strip().split("\n")
                            if len(job_text) < 2:
                                print(job_text)
                            else:
                                jobs = job_text[0]
                                if job_text[1].find(constants.POINT) > 0 :
                                    city = job_text[1].split(constants.POINT)[0].replace('[', '')
                                    area = job_text[1].split(constants.POINT)[1].replace(']', '')
                                    locale_area = job_text[1]
                                else:
                                    city = job_text[1].replace('[','').replace(']','')
                                    area = city
                                    locale_area = city
                                update_time = common.get_date()
                                field = "http_url, http_name, cid, city, area, locale_area, update_time"
                                sql = "INSERT INTO %s(%s) VALUES('%s', '%s', '%s', '%s', '%s', '%s', '%s')" \
                                      % (constants.TABLE_JOBS_INFO_TMP, field, http_url, jobs, id, city, area, locale_area, update_time)
                                duplicate_sql = " http_url=values(http_url), http_name=values(http_name), city=values(city), area=values(area), update_time=values(update_time)"
                                sql = "%s %s %s" % (sql, constants.DUPLICATE_KEY_UPDATE, duplicate_sql)
                                jobs_id = mysql.MysqlConnect().save_one_data(sql)
                                common.var_dump("query:%s --- 返回id --- %s" % (sql, jobs_id), self.db_logs_file)
                                timestamp = datetime.utcnow()
                                # 过期时间48小时
                                mongo_dict = {'jobs': jobs, 'add_date': common.get_init_time(), constants.JOBS_MONGO_KEY : timestamp, 'status': 1, 'source': constants.SOURCE_URL, 'type': constants.JOBS_TYPE, 'jobs_id':jobs_id}
                                mongo.MongoCache(http_url, constants.JOBS_MONGO_EXPIRES, constants.JOBS_MONGO_KEY, constants.JOBS_MONGO_VALUE).create_one_data(_id=http_url, data=mongo_dict)
                                # common.var_dump(mongo_dict, self.mongo_logs_file)

                    if str(a['href']).find('gongsi') > 0:
                        if a.text is None or a.text is "" or a.text is False:
                            pass
                        else:
                            update_time = common.get_date()
                            sql = "INSERT INTO %s(company_name, company_url, update_time) VALUES('%s', '%s', '%s')" % (constants.TABLE_COMPANY_INFO_TMP, a.text, a['href'], update_time)
                            duplicate_sql = " company_name=values(company_name), company_url=values(company_url), update_time=values(update_time)"
                            sql = "%s %s %s" % (sql, constants.DUPLICATE_KEY_UPDATE, duplicate_sql)
                            company_id = mysql.MysqlConnect().save_one_data(sql)
                            common.var_dump("query:%s -- 返回id -- %s" % (sql,company_id), self.db_logs_file)
                            #  未加索引没有过期时间
                            mongo_dict = {'company':a.text,'add_date': common.get_init_time(), 'timestamp': int(time.time()), 'status': 1, 'company_id': company_id, 'source': constants.SOURCE_URL, 'type': constants.COMPANY_TYPE}
                            mongo.MongoCache().create_one_data(_id=a['href'], data=mongo_dict)
                            # common.var_dump(mongo_dict, self.mongo_logs_file)
                            sql = "update %s SET company_id=%s WHERE id=%s " % (constants.TABLE_JOBS_INFO_TMP, company_id, jobs_id)
                            mysql.MysqlConnect().mysql_execute(sql)
                            common.var_dump("query:%s" % sql)

                            # job_dic = [{'href': a['href'], 'text': a.text}]
                            # job_list.append(job_dic)


if __name__ == "__main__":
    # 第一步获取分类
    lagou_spider = LagouSpider()
    # lagou_spider.run_get_category(constants.LAGOU_URL, constants.LAGOU_KEY)
    lagou_spider.get_category_url()
    # 根据每个分类获取岗位，公司数据

