import logging
import os
from DrissionPage import Chromium, ChromiumOptions
from dotenv import load_dotenv
from database import get_db_session, Job


load_dotenv()  # 默认加载当前目录下的.env文件

logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

CHROME_PATH = os.getenv(
    "CHROME_PATH", "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
)
SEARCH_KEY = os.getenv("SEARCH_KEY", "AI前端")


class Spider:
    def __init__(self):
        self.chrome_path = CHROME_PATH
        self._setup_browser()

    # 编写浏览器初始化的方法
    def _setup_browser(self):
        try:
            ChromiumOptions().set_browser_path(self.chrome_path).save()
            # 创建Chromium浏览器对象
            self.browser = Chromium()
            # 获取最新的标签页对象
            self.tab = self.browser.latest_tab
        except Exception as e:
            logger.error(f"爬虫运行失败:{e}")
            raise

    def get_job_detail(self):
        try:
            detail_result = self.tab.listen.wait()
            detail_data = detail_result.response.body
        except AttributeError:
            detail_data = None
            logger.info("无响应数据")
            return

        try:
            # 获取工作列表
            job_list = detail_data.get("resultbody", {}).get("job", {}).get("items", [])

            if not job_list:
                logger.info("无职位数据")
                return

            for item in job_list:
                job_data = {
                    "job_id": item.get("jobId", ""),
                    "job_name": item.get("jobName", ""),
                    "company": item.get("fullCompanyName", ""),
                    "city": item.get("jobAreaLevelDetail", {}).get("cityString", ""),
                    "experience": item.get("workYearString", ""),
                    "education": item.get("degreeString", ""),
                    "description": item.get("jobDescribe", ""),
                }

                with get_db_session() as session:
                    if job_data:
                        job = (
                            session.query(Job)
                            .filter(Job.job_id == job_data.get("job_id"))
                            .first()
                        )
                        # 如果Job表中存在职位，更新职位信息
                        if job:
                            for key, value in job_data.items():
                                if hasattr(job, key) and key != "job_id":
                                    setattr(job, key, value)
                        else:
                            # 如果Job表中不存在职位，添加职位信息
                            session.add(Job(**job_data))

            logger.info("全部爬取成功")
        except Exception as e:
            logger.error(f"爬虫运行失败:{e}")
            raise

    def crawl_jobs(self):
        logger.info("开始爬取BOSS上的职位信息")
        # 监听请求
        self.tab.listen.start("/api/job/search-pc")
        # 打开页面
        self.tab.get(f"https://we.51job.com/pc/search?keyword=AI%E5%89%8D%E7%AB%AF")
        # 获取职位详情
        self.get_job_detail()


def run_spider():
    try:
        spider = Spider()
        spider.crawl_jobs()
    except Exception as e:
        logger.error(f"爬虫运行失败:{e}")
        raise


if __name__ == "__main__":
    run_spider()
