# *-* coding:utf8 *-*

# 需要安装,发送请求用
import urllib.parse
import json
import requests
# 需要安装,解析数据用
from lxml import etree
# 正则
import re

from spider_server.conf.config_util import ConfigUtil
from spider_server.email_smtp.email_util import EmailSMTP
from spider_server.logs.logger import Logger
from spider_server.message_sms.message_util import TencentSMS
from spider_server.scheduler.scheduler_db import SchedulerDB
from spider_server.sxggzp_spider.sxggzp_db import SxggzpDB

"""
1 准备URL列表
2 遍历URL,发送请求,获取响应数据
3 解析数据
4 保存数据
"""
logger = Logger(__name__).get_log()


class SxggzpServer(object):
    def __init__(self):
        """初始化数据"""
        # 国企招聘url
        self.url_gq = ConfigUtil().get("URL", "URL_SXGGZP_GYQY")
        # 事业单位招聘url
        self.url_sy = ConfigUtil().get("URL", "URL_SXGGZP_SYDW")
        # 招聘快讯url
        self.url_kx = ConfigUtil().get("URL", "URL_SXGGZP_ZPKX")
        # 指定请求头
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
        }

    def run(self):
        """程序入口,核心入口"""
        # 处理国企数据
        page_gq = self.get_page_from_url(self.url_gq)
        datas_gq = self.get_datas_from_page(page_gq, "1")
        self.save_data(datas_gq)
        # 处理事业单位数据
        page_sy = self.get_page_from_url(self.url_sy)
        datas_sy = self.get_datas_from_page(page_sy, "2")
        self.save_data(datas_sy)
        # 处理招聘快讯数据
        page_kx = self.get_page_from_url(self.url_kx)
        datas_kx = self.get_datas_from_page(page_kx, "3")
        self.save_data(datas_kx)

    def get_page_from_url(self, url):
        """根据url,发送请求,获取接口数据"""
        response = requests.get(url, headers=self.headers, verify=False)
        # 返回响应的字符串数据,二进制需要转为字符串
        response.encoding = response.apparent_encoding
        return response.content.decode()

    def get_datas_from_page(self, page, notice_type):
        """解析页面数据"""
        # 页面转换为Element,就可以使用Xpath提取数据了
        element = etree.HTML(page)
        # 获取标签列表
        # xpath返回的是一个列表
        divs = element.xpath("//*[@class='content-middle']/div")
        # 遍历列表,提取需要的数据
        data_list = []
        for div in divs:
            item = {}
            # ./ 表示当前路径之下,// 表示获取该节点及其之下所有的文本
            # 通知名称
            item["notice_name"] = div.xpath("./a//text()")[0].strip()
            # 通知时间
            item["notice_time"] = div.xpath("./div//text()")[0].strip()
            # 通知url
            item["notice_url"] = div.xpath("./a/@href")[0]
            # 区分国企/事业单位
            item["notice_type"] = notice_type
            data_list.append(item)
        return data_list

    def save_data(self, datas):
        """保存数据"""
        for data in datas:
            notice_type = "国" if data["notice_type"] == "1" else "事" if data["notice_type"] == "2" else "讯"
            # 查看数据是否已经入库
            count = SxggzpDB().get_exam_count(data["notice_name"])
            if count == 0:
                # 新通知处理
                logger.info("考试通知--->%s" % data["notice_name"])
                # 邮件通知
                job_detail = SchedulerDB().get_job_detail("sxggzp_job")
                content = "标题：%s \n 时间：%s \n 链接：%s" % (
                        data["notice_name"], data["notice_time"], data["notice_url"])
                if job_detail["email_notice"] == "1":
                    EmailSMTP().send_email("陕西公共ZP-%s" % notice_type, content.replace("招聘", "ZP"))
                # 短信通知
                if job_detail["message_notice"] == "1":
                    TencentSMS().send_message("陕西公共ZP-%s" % notice_type)
                # 数据入库
                SxggzpDB().save_exam_data(data)

    def get_exam_lists(self, name):
        """返回考试信息列表"""
        results = SxggzpDB().get_exam_lists(name)
        return results


if __name__ == '__main__':
    SxggzpServer().run()
