import logging
import os
from DrissionPage import Chromium, ChromiumOptions
from dotenv import load_dotenv
from database1 import get_db_session, Job

load_dotenv()  # 默认加载当前目录下的.env文件

logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

CHROME_PATH = os.getenv(
    "CHROME_PATH", "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
)


class Spider:
    def __init__(self):
        self.chrome_path = CHROME_PATH
        self._setup_browser()

    def _setup_browser(self):
        try:
            ChromiumOptions().set_browser_path(self.chrome_path).save()
            # 创建浏览器对象
            self.browser = Chromium()
            # 获取最新的标签页对象
            self.tab = self.browser.latest_tab
        except Exception as e:
            logger.error(f"浏览器初始化失败: {e}")
            raise e

    def crawl_jobs(self):
        logger.info("开始爬取职位信息")
        self.tab.wait(1)
        # 开始监听/wapi/zpgeek/job/detail.json的网络请求
        self.tab.listen.start("/c/i/search/positions")
        self.tab.get(f"https://www.zhaopin.com/sou/jl765/kw010G0IAI9LTEU/p1")
        search_button = self.tab.eles(".query-search__content-button")[0]
        print("search_button", search_button)
        search_button.click()
        self.tab.wait(1)
        job_list_result = self.tab.listen.wait()  # 等待并获取一个detail.json数据包
        job_list_data = job_list_result.response.body
        # 已经处理的元素的数量
        processd_count = 0
        job_list = job_list_data.get("data", {}).get("list", [])
        for item in job_list:
            job_data = {
                "job_id": item.get("jobId", ""),
                "job_name": item.get("name", ""),
                "company": item.get("companyName", ""),
                "city": item.get("jobRootOrgInfo", {}).get("cityName"),
                "experience": item.get("workingExp", ""),
                "education": item.get("education", ""),
                "description": item.get("jobSummary", ""),
                "match_result": "",
            }
            with get_db_session() as session:
                job_to_update = (
                    session.query(Job).filter_by(job_id=job_data["job_id"]).first()
                )
                if job_to_update:
                    print(job_to_update)
                    session.query(Job).filter(
                        Job.job_id == job_data.get("job_id")
                    ).update(job_data)
                else:
                    session.add(Job(**job_data))
            self.tab.wait(1)
            processd_count += 1
        logger.info(f"已经处理完了{processd_count}个职位")
        self.tab.wait(1)


def main():
    try:
        spider = Spider()
        spider.crawl_jobs()
    except Exception as e:
        logger.error(f"爬虫运行失败: {e}")
        raise e


if __name__ == "__main__":
    main()
