"""
Author : Mr.Sun
DateTime : 2022/9/28.17:57
FileName : RecruitAction.py
Desc :  招聘信息采集
参考一下  https://www.w3cschool.cn/beautifulsoup4/beautifulsoup4-t2fh3fhz.html

"""
import os
import re
import sys
import json
import time
import requests
from bs4 import BeautifulSoup
from utils.logger import logger
from core.OperateSqlite import sqliteOperate
from core.KeyWordsOperate import KeyWordsOperate

project_dir = os.path.dirname(os.path.dirname(__file__))
file_path = os.path.join(project_dir, 'temp/1.html')

# 获取所有或者条件的页面链接
"""
两种方式
1. 直接拼接
2. 通过页面获取 下一页获取
"""


def splicing_all_url(limit):
    try:
        url_list = ['https://www.chinagwy.org/html/zkgg/index.html']
        for i in range(2, limit + 1):
            url_temp = "https://www.chinagwy.org/html/zkgg/3_" + str(
                i) + ".html"
            url_list.append(url_temp)
        url_list.reverse()
        return url_list
    except Exception as e:
        logger.error("get all url is occur error is :" + str(e))


# 根据条件获取具体页面的链接，并处理相关内容
def recruit_urls(base_url):
    try:
        # 获取页面内容
        web_data = requests.get(url=base_url)
        web_data.encoding = "utf-8"
        content = web_data.text
        # 获取所有符合条件的链接
        bs = BeautifulSoup(content, "html.parser")
        all_urls = bs.find_all('a', attrs={"title": True, "class": False})

        result = []
        for i in all_urls:
            i_url = i.get('href')
            i_title = i.get('title')
            i_date = i.next_sibling
            pat = re.compile(r'(?<=>).*(?=<)')
            if i_date:
                date = ''.join(pat.findall(str(i_date))).replace(" ", "")
            i_parent = i.parent
            i_type = i_parent.contents[0]
            if i_type:
                exam_type = ''.join(pat.findall(str(i_type))).replace(
                    " ", "").replace("【", "").replace("】", "")
                key_type = KeyWordsOperate().exam_type_match(exam_type)
                if key_type:
                    title_key = KeyWordsOperate().title_keys_match(
                        str(i_title))
                    if title_key:
                        result.append({
                            "url": i_url,
                            "title": i_title,
                            "key_word": title_key,
                            "publish_date": date,
                            "exam_type": exam_type
                        })
        logger.info(base_url + "  get match url and title is : " +
                    json.dumps(result, ensure_ascii=False))
        return result
    except Exception as e:
        logger.info(" recruit_urls occur Exception is : " + str(e))


# 聚合页面信息url
def position_together(page):
    try:
        url_sql = "select url from job_information;"
        position_index_list = splicing_all_url(page)
        for url in position_index_list:
            position_url = recruit_urls(url)
            for position_detail in position_url:
                data_url = sqliteOperate().search_data(url_sql)
                url_database = [data[0] for data in data_url]
                if position_detail['url'] in url_database:
                    logger.info("position_together " + position_detail['url'] +
                                " is already exist.")
                    pass
                else:
                    sql = "insert into job_information(exam_type,url,title,key_word,publish_at) values(?,?,?,?,?)"
                    sqliteOperate().insert_data(
                        sql=sql,
                        data=(position_detail['exam_type'],
                              position_detail['url'], position_detail['title'],
                              position_detail['key_word'],
                              position_detail['publish_date']))
                    logger.info(
                        "insert job_information " +
                        json.dumps(position_detail, ensure_ascii=False) +
                        "  success.")
            time.sleep(2)
    except Exception as e:
        logger.error("position_together occur error is " + str(e))


if __name__ == '__main__':
    # recruit_urls("https://www.chinagwy.org/html/zkgg/index.html")
    # splicing_all_url(10)
    limit = sys.argv[1]
    position_together(int(limit))
