import logging
import os
import pdb  # 添加pdb调试器导入
import json  # 添加json导入
import random
from DrissionPage import Chromium, ChromiumOptions
from dotenv import load_dotenv
from database import Session, Job, get_db_session, CHROME_PATH, SEARCH_KEY
import re

load_dotenv()  # 默认加载当前目录下的.env文件

logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)


class Spider:
    def __init__(self):
        self.chrome_path = CHROME_PATH
        self._setup_browser()

    # 编写浏览器初始化的方法
    def _setup_browser(self):
        try:
            ChromiumOptions().set_browser_path(self.chrome_path).save()
            # 创建Chromium浏览器对象
            self.browser = Chromium()
            # 获取最新的标签页对象
            self.tab = self.browser.latest_tab
        except Exception as e:
            logger.error(f"_setup_browser爬虫运行失败:{e}")
            raise

    def get_job_detail(self, card, company_name):
        res = {}
        res["company_name"] = company_name
        try:
            # 在点击卡片前增加一个1到2秒的随机延迟
            delay = random.uniform(5, 10)
            logger.info(f"等待 {delay:.2f} 秒后点击卡片...")
            self.tab.wait(delay)

            # 记录当前标签页数量
            original_tab_count = self.browser.tabs_count

            # 点击卡片，打开新标签页
            card.click()

            # 等待新标签页打开
            new_tab = self.wait_for_new_tab(original_tab_count, timeout=10)

            if new_tab:
                # 切换到新打开的标签页
                self.browser.activate_tab(new_tab)
                # 等待页面加载 - 增加等待时间
                new_tab.wait(5)  # 从2秒增加到5秒

                try:
                    # 尝试多种可能的选择器
                    job_intro_ele = new_tab.ele(
                        "@data-selector=job-intro-content", timeout=10  # 增加超时时间
                    )
                    description = job_intro_ele.text if job_intro_ele else ""
                    res["description"] = description
                    job_name_ele = new_tab.ele(
                        "@class:job-title", timeout=10
                    )  # 增加超时时间
                    job_name = job_name_ele.text if job_name_ele else ""
                    res["job_name"] = job_name
                    city = ""
                    experience = ""
                    education = ""
                    res["job_id"] = (self.extract_job_id_from_url(new_tab.url),)
                    properties_ele = new_tab.ele(
                        ".job-properties", timeout=10
                    )  # 增加超时时间
                    if properties_ele:
                        # 获取所有不包含 'split' 类的 span 元素
                        spans = properties_ele.eles("css:span:not(.split)")

                        # 根据图片结构，第一个是城市，第二个是经验，第三个是学历
                        if len(spans) > 0:
                            city = spans[0].text
                        if len(spans) > 1:
                            experience = spans[1].text
                        if len(spans) > 2:
                            education = spans[2].text

                    return {
                        "description": description,
                        "job_id": self.extract_job_id_from_url(new_tab.url),
                        "job_name": job_name,
                        "experience": experience,
                        "education": education,
                        "city": city,
                        "company": company_name,
                    }
                except Exception as e:
                    logger.error(f"在新标签页中获取职位信息失败: {e}")

                    return None

                finally:
                    # 关闭新标签页
                    try:
                        new_tab.close()
                        # 切换回原始标签页
                        self.browser.activate_tab(self.tab)
                    except Exception as e:
                        logger.error(f"关闭标签页时出错: {e}")
            else:
                logger.warning("点击后没有打开新标签页")
                return None

        except Exception as e:
            print(e, company_name, res)
            logger.error(f"get_job_detail方法执行失败: {e}")
            return None

    def extract_job_id_from_url(self, url):
        #  url的值类似于 https://www.liepin.com/job/1975275873.shtml?pgRef=c_pc_search_page%3Ac_pc_search_job_listcard%402_75275873%3A1%3A64423249-f88b-45d1-8bc2-930d7de8ef0b&d_sfrom=search_prime&d_ckId=null&d_curPage=0&d_pageSize=40&d_headId=null&d_posi=0&skId=9wn2uef32pj0o0q1m7mtqzq2u5xq7ima&fkId=9wn2uef32pj0o0q1m7mtqzq2u5xq7ima&ckId=wmnnbl71ietea22n0djgz1wqq2svmc7b&sfrom=search_job_pc&curPage=0&pageSize=40&index=0
        #  我们需要提取出1975275873这个数字
        import re

        pattern = r"/.+/(\d+)\.shtml"
        match1 = re.search(pattern, url)
        if match1:
            return match1.group(1)
        else:
            # 如果没有找到，返回URL的哈希值作为ID
            return str(hash(url))

    def wait_for_new_tab(self, original_count, timeout=10):
        """
        等待新标签页打开
        :param original_count: 原始标签页数量
        :param timeout: 超时时间（秒）
        :return: 新标签页对象或None
        """
        import time

        start_time = time.time()
        last_count = original_count

        while time.time() - start_time < timeout:
            current_count = self.browser.tabs_count
            # 如果标签页数量增加了
            if current_count > original_count:
                # 等待一下确保新标签页完全加载
                time.sleep(1.0)  # 从0.5秒增加到1.0秒

                # 获取最新标签页
                new_tab = self.browser.latest_tab

                # 验证这确实是一个新标签页（URL不是空白页）
                if new_tab.url and new_tab.url != "about:blank":
                    # logger.info(f"检测到新标签页: {new_tab.url}")
                    return new_tab
                else:
                    # 如果URL是空白，继续等待
                    logger.info("检测到新标签页但URL为空，继续等待...")

            # 如果标签页数量没有变化，等待更长时间
            elif current_count == last_count:
                time.sleep(0.2)
            else:
                # 标签页数量变化了，重置等待时间
                last_count = current_count
                time.sleep(0.1)

        logger.warning(f"等待新标签页超时 ({timeout}秒)")
        return None

    def crawl_jobs(self):
        # https://www.zhipin.com/wapi/zpgeek/pc/recommend/job/list.json 列表接口
        logger.info("开始爬取猎豹上的职位信息")
        # 开始监听列表接口
        self.tab.listen.start("/api/com.liepin.searchfront4c.pc-search-job")
        self.tab.get(
            f"https://www.liepin.com/zhaopin/?inputFrom=c_index&workYearCode=0&key=AI%E5%89%8D%E7%AB%AF&scene=input&ckId=xju9kg19w73la3yv2t8wlxmyq9sq2eva&"
        )
        card_selector = "@class:job-card-pc-container"
        try:
            self.tab.wait.eles_loaded(card_selector, timeout=30)
        except Exception as e:
            logger.error(f"等待职位卡片加载失败:{e}")
            raise

        # --- 开始分页爬取 ---
        page_num = 1
        while True:
            logger.info(f"--- 开始处理第 {page_num} 页 ---")

            # 等待当前页的卡片加载完成
            self.tab.wait.eles_loaded(card_selector, timeout=20)
            cards = self.tab.eles(card_selector)

            if not cards:
                logger.warning(f"第 {page_num} 页没有找到职位卡片，爬取结束。")
                break

            logger.info(f"在第 {page_num} 页找到 {len(cards)} 个职位。")

            for card in cards:
                try:
                    company_name_ele = card.ele("@class:company-name", timeout=5)
                    job_data = self.get_job_detail(card, company_name_ele.text)

                    if job_data:
                        job_data["company"] = (
                            company_name_ele.text if company_name_ele else ""
                        )
                        with get_db_session() as session:
                            # 根据job_id查询是否已存在
                            existing_job = (
                                session.query(Job)
                                .filter_by(job_id=job_data["job_id"])
                                .first()
                            )

                            if existing_job:
                                # 如果找到，则更新现有记录
                                logger.info(
                                    f"更新职位: {job_data['job_id']} {job_data['company']}"
                                )
                                for key, value in job_data.items():
                                    setattr(existing_job, key, value)
                            else:
                                # 如果没找到，则插入新记录
                                logger.info(
                                    f"插入新职位: {job_data['job_id']} {job_data['company']}"
                                )
                                session.add(Job(**job_data))
                    else:
                        logger.warning("获取职位数据失败")

                except Exception as e:
                    logger.error(f"处理职位卡片时出错: {e}")
                    continue

            logger.info(f"--- 第 {page_num} 页处理完毕 ---")

            # --- 寻找并点击下一页 ---
            # 根据描述，下一页按钮在 class="ant-pagination" 的元素下，且 title="Next Page"
            next_page_button = self.tab.ele("css:li.ant-pagination-next", timeout=5)

            # 检查按钮是否存在，并检查其 aria-disabled 属性
            if next_page_button and next_page_button.attr("aria-disabled") == "false":
                logger.info("找到'下一页'按钮，准备点击...")
                next_page_button.click()
                page_num += 1
                # 等待页面加载的逻辑由循环顶部的eles_loaded保证
            else:
                logger.info("未找到可点击的'下一页'按钮，爬取结束。")
                break


def run_spider():
    try:
        spider = Spider()
        spider.crawl_jobs()
    except Exception as e:
        logger.error(f"crawl_jobs爬虫运行失败:{e}")
        raise


if __name__ == "__main__":
    run_spider()
