#!/usr/bin/python
# -*- coding:utf-8 -*-
# -------------------------------------------------------
# @author  : 龍皓
# @time    : 2025/2/25 21:52
# @function: the script is used to do something.
# @version : 1.0.0
# -------------------------------------------------------
import random
import re
import time

import pymysql

from core.db.db_pool import DatabasePool
from core.db.models import Jobs
from core.util.logger import setup_logger
from DrissionPage import Chromium
from core.util.request_util import RequestUtil
from core.util.file_util import FileUtil



class BossZhiPinCrawler:
    def __init__(self):
        self.logger = setup_logger()
        self.request_util = RequestUtil()

        # 注册 pymysql 为 MySQL 的 DBAPI
        pymysql.install_as_MySQLdb()
        # 初始化数据库连接池
        db_pool = DatabasePool()
        db_pool.initialize()
        self.db = db_pool.get_session()  # 获取数据库会话

        # 创建浏览器页面对象（默认模式为浏览器自动化）
        self.driver = Chromium().latest_tab
        self.driver.headers = self.request_util.get_headers()

    def search_jobs(self, post_name: str, city: str, page=1):
        # 监听网站数据包，必须在请求之前先执行
        self.driver.listen.start("/wapi/zpgeek/search/joblist.json")
        url = f"https://www.zhipin.com/web/geek/job?query={post_name}&city={city}&page={page}&pageSize=30"
        self.driver.get(url)
        self.logger.debug(f"catching page {page} ...")
        # 等待数据包内容加载
        resp = self.driver.listen.wait()
        # 获取数据包内容
        return resp.response.body

    def clean_html(self, html_text: str):
        # 去除 <div> 标签
        cleaned_text = re.sub(r'<div[^>]*>', '', html_text)
        # 去除 </div> 标签
        cleaned_text = re.sub(r'</div>', '', cleaned_text)
        # 去除 <br> 标签
        cleaned_text = re.sub(r'<br>', '\n', cleaned_text)

        self.logger.debug(cleaned_text)
        return cleaned_text

    def pipeline(self, job_info: dict):
        job_list = []
        print(job_info['zpData']['jobList'])
        for job in job_info['zpData']['jobList']:
            existing_job = self.db.query(Jobs).filter_by(jobName=job['jobName'],
                                                    link=f"https://www.zhipin.com/job_detail/{job['encryptJobId']}.html").first()
            if existing_job:
                continue  # 如果数据库中已存在相同数据，则跳过

            # 获取职位详情
            details = self.get_job_details(job["encryptJobId"])
            self.logger.debug(details)
            job_data = {
                "jobName": job['jobName'],
                "salaryDesc": job['salaryDesc'],
                "jobLabels": str(job['jobLabels']),
                "jobDegree": job['jobDegree'],
                "cityName": job['cityName'],
                "brandName": job['brandName'],
                "brandScaleName": job['brandScaleName'],
                "brandStageName": job['brandStageName'],
                "areaDistrict": job['areaDistrict'],
                "brandIndustry": job['brandIndustry'],
                "link": f"https://www.zhipin.com/job_detail/{job['encryptJobId']}.html",
                "desc": self.clean_html(details),
                "channel_id": 1
            }
            job_list.append(job_data)
            self.db.add(Jobs(**job_data))  # 添加数据到会话

            # 添加随机延迟（1-5秒）
            delay = random.uniform(1, 5)
            time.sleep(delay)
        self.db.commit()  # 提交会话

        # # 保存数据到JSON文件
        FileUtil.write_json_file('jobs.json', job_list)
        FileUtil.write_json_file('job_info.json', job_info['zpData']['jobList'])
        self.logger.info("保存完成")

    def get_job_details(self, job_id: str):
        job_url = f"https://www.zhipin.com/job_detail/{job_id}.html"
        self.logger.debug(job_url)
        self.driver.get(job_url)
        detail = self.driver.eles(".job-sec-text")
        return detail[0].html



if __name__ == '__main__':
    crawler = BossZhiPinCrawler()
    resp = crawler.search_jobs("python", "101010100")
    print(resp)