import logging
import os
from pprint import pprint
from DrissionPage import Chromium, ChromiumOptions
from dotenv import load_dotenv

load_dotenv()  # 默认加载当前目录下的.env文件

logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

CHROME_PATH = os.getenv(
    "CHROME_PATH", "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome "
)
SEARCH_KEY = os.getenv("SEARCH_KEY", "AI前端")


class Spider:
    def __init__(self):
        self.chrome_path = CHROME_PATH
        self._setup_browser()

    # 编写浏览器初始化的方法
    def _setup_browser(self):
        try:
            ChromiumOptions().set_browser_path(self.chrome_path).save()
            # 创建Chromium浏览器对象
            self.browser = Chromium()
            # 获取最新的标签页对象
            self.tab = self.browser.latest_tab
        except Exception as e:
            logger.error(f"爬虫运行失败:{e}")
            raise

    def get_job_detail(self, card):
        try:
            job_name = card.ele(".job-name").text
            company = card.ele("@|class=boss-info@|class=boss-name").text
            city = card.ele(".company-location").text
            job_info = card.ele(".tag-list")
            job_infos = job_info.eles("tag:li")
            experience = job_infos[0].text
            education = job_infos[1].text
            print("education", education)
            detail_result = self.tab.listen.wait()  # 等待并获取一个detail.json数据包
            print("等待并获取一个detail", detail_result)
            detail_data = detail_result.response.body
            description = (
                detail_data.get("zpData", {})
                .get("jobInfo", {})
                .get("postDescription", "")
            )
            return {
                "job_name": job_name,
                "company": company,
                "city": city,
                "experience": experience,
                "education": education,
                "description": description,
            }
        except Exception as e:
            logger.error(f"爬虫运行失败:{e}")
            raise

    def crawl_jobs(self):
        logger.info("开始爬取BOSS上的职位信息")
        # 开始监听/wapi/zpgeek/job/detail.json的网络请求
        self.tab.listen.start("/wapi/zpgeek/job/detail.json")
        self.tab.get(
            f"https://www.zhipin.com/web/geek/jobs?query=AI%E5%89%8D%E7%AB%AF&city=100010000"
        )
        try:
            self.tab.wait.eles_loaded(".job-card-box", timeout=30)
        except Exception as e:
            logger.error(f"等待职位卡片加载失败:{e}")
            raise
        # 获取所有的职位卡信息元素列表
        cards = self.tab.eles(".job-card-box")

        for card in cards:
            job_data = self.get_job_detail(card)
            pprint(job_data)


def run_spider():
    try:
        spider = Spider()
        spider.crawl_jobs()
    except Exception as e:
        logger.error(f"爬虫运行失败:{e}")
        raise


if __name__ == "__main__":
    run_spider()
