import logging
import os
from DrissionPage import Chromium, ChromiumOptions
from dotenv import load_dotenv
from database import get_db_session, Job

load_dotenv()  # 默认加载当前目录下的.env文件

logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

CHROME_PATH = os.getenv(
    "CHROME_PATH", "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
)


class Spider:
    def __init__(self):
        self.chrome_path = CHROME_PATH
        self._setup_browser()

    def _setup_browser(self):
        try:
            ChromiumOptions().set_browser_path(self.chrome_path).save()
            # 创建浏览器对象
            self.browser = Chromium()
            # 获取最新的标签页对象
            self.tab = self.browser.latest_tab
        except Exception as e:
            logger.error(f"浏览器初始化失败: {e}")
            raise e

    def get_job_detail(self, card):
        try:
            job_name = card.ele(".job-name").text
            logger.info(job_name)
            company = card.ele("@|class=boss-info@|class=boss-name").text
            city = card.ele(".company-location").text
            job_info = card.ele(".tag-list")
            job_infos = job_info.eles("tag:li")
            experience = job_infos[0].text
            education = job_infos[1].text
            detail_result = self.tab.listen.wait()  # 等待并获取一个detail.json数据包
            detail_data = detail_result.response.body
            job_id = (
                detail_data.get("zpData", {}).get("jobInfo", {}).get("encryptId", "")
            )
            description = (
                detail_data.get("zpData", {})
                .get("jobInfo", {})
                .get("postDescription", "")
            )
            return {
                "job_id": job_id,
                "job_name": job_name,
                "company": company,
                "city": city,
                "experience": experience,
                "education": education,
                "description": description,
            }
        except Exception as e:
            logger.error(f"爬虫运行失败:{e}")
            raise

    def crawl_jobs(self):
        logger.info("开始爬取职位信息")
        # 开始监听/wapi/zpgeek/job/detail.json的网络请求
        self.tab.listen.start("/wapi/zpgeek/job/detail.json")
        self.tab.get(
            f"https://www.zhipin.com/web/geek/jobs?query=AI%E5%89%8D%E7%AB%AF&city=100010000"
        )
        try:
            self.tab.wait.eles_loaded(".job-card-box", timeout=30)
        except Exception as e:
            logger.error(f"等待职位卡片加载失败:{e}")
            raise
        # 已经处理的元素的数量
        processd_count = 0
        while True:
            # 获取所有的职位卡信息元素列表
            cards = self.tab.eles(".job-card-box")
            # 获取当前的所有的职位卡片的数量 15
            current_count = len(cards)
            # 如果当前的总的卡片数量等于已经处理的卡片数量的话
            if current_count == processd_count:
                logger.info(f"已经处理完了所有的职位，共{processd_count}个")
                break
            for card in cards[processd_count:]:
                card.click()
                self.tab.wait(1)
                job_data = self.get_job_detail(card)
                with get_db_session() as session:
                    if job_data:
                        job_to_update = (
                            session.query(Job)
                            .filter_by(job_id=job_data["job_id"])
                            .first()
                        )
                        if job_to_update:
                            print(job_to_update)
                            session.query(Job).filter(
                                Job.job_id == job_data.get("job_id")
                            ).update(job_data)
                        else:
                            session.add(Job(**job_data))
                self.tab.wait(1)
            # 已经处理的数量等于总的职位的数量
            processd_count = current_count
            logger.info(f"已经处理完了{processd_count}个职位")
            # 让窗口向下滚动到底部
            self.tab.scroll.to_bottom()
            self.tab.wait(1)


def main():
    try:
        spider = Spider()
        spider.crawl_jobs()
    except Exception as e:
        logger.error(f"爬虫运行失败: {e}")
        raise e


if __name__ == "__main__":
    main()
