import json
import os
import re
import tempfile
import time
from multiprocessing.pool import ThreadPool
from typing import Set, Dict, Any, List

import fitz
import jieba
import requests
from loguru import logger
from multiprocessing import Process, Event, Manager

# 假设这些类和路径是可用的，保留导入
from application.ai_agent.problem_optimization import ProblemOptimization
from application.ai_agent.relevance_score import RelevanceScore
from application.browser.crawler_browser import CrawlerBrowser
from application.settings import KEY_WORD_PATH
from tenacity import retry, stop_after_attempt


class CrawlerTask:
    """网页爬取与问题优化任务类 (修正了 CralwlerTask 拼写错误)"""

    # ------------------------------------------------------------

    def __init__(self, max_queue_size: int = 5, num_processes: int = 5, min_score: int = 60):
        manager = Manager()
        self.queue: List[Dict[str, Any]] = manager.list()
        # 优化: 使用 Manager.dict() 模拟集合 (Set) 实现 O(1) 查重，解决并发和效率问题
        self.url_set: Dict[str, bool] = manager.dict()
        self.stop_event = Event()  # 停止事件

        self.max_queue_size = max_queue_size  # 数据队列上限
        self.num_processes = num_processes  # 并行进程数量 (未在 process 中使用，但保留)
        self.min_score = min_score  # 最小得分
        self.thead_pool_max = 3  # 线程池大小

        # 加载停用词表
        try:
            with open(KEY_WORD_PATH + '/baidu_stopwords.txt', 'r', encoding='utf-8') as f:
                self.stopwords: Set[str] = set(f.read().split('\n'))
        except FileNotFoundError:
            logger.error(f"停用词文件未找到: {KEY_WORD_PATH}/baidu_stopwords.txt")
            self.stopwords = set()

    # ------------------------------------------------------------

    def process(self, input_text: str, url_task_list: List[Dict[str, Any]]):
        """主流程控制函数，执行问题优化、分词、任务分发等操作"""
        logger.info(f"问题优化开始: {input_text}, url_task_list: {url_task_list}")

        # --- 1. 问题优化与分词 ---
        agent = ProblemOptimization()
        result = agent.run({"input_text": input_text})

        if not result.get('result'):
            logger.warning(f"问题优化失败: {result.get('msg')}")
        else:
            input_text = result['data']['content']
            logger.info(f"问题优化成功: {input_text}")

        seg_list = list(jieba.cut(input_text, cut_all=False))
        new_seg_list: Set[str] = set(seg_list) - self.stopwords
        logger.info(f"分词去停用词成功: {new_seg_list}")

        # --- 2. 启动多个子进程 ---
        processes = []
        for url_task in url_task_list:
            # 首次添加 URL 到已处理集合，防止在多个进程中重复启动同一个 URL 的爬取
            url = url_task['url']
            if url in self.url_set:
                logger.warning(f"URL: {url} 已经在初始任务列表中重复，跳过。")
                continue

            # 使用原子操作标记此 URL 已开始处理
            self.url_set[url] = True

            p = Process(target=self.run_task, args=(
                input_text, new_seg_list, url, url_task['depth'],
                url_task['breadth'], url_task['brower'], 0, self.stop_event))
            processes.append(p)

        for p in processes:
            p.start()

        # --- 3. 监控和等待 ---
        try:
            while True:
                size = len(self.queue)
                alive_processes = sum(p.is_alive() for p in processes)
                logger.info(
                    f"[Monitor] 当前队列大小: {size} | 活跃进程数: {alive_processes}")

                # 达到上限则触发停止
                if size >= self.max_queue_size:
                    logger.info("[Monitor] 队列已满，触发关闭事件！")
                    self.stop_event.set()
                    break

                # 检查是否所有子进程都结束
                if alive_processes == 0:
                    break

                time.sleep(3)
        finally:
            # 等待所有子进程安全退出
            # 触发停止事件，立即停止所有进程和线程
            self.stop_event.set()
            logger.info(f"[Main] 队列已满，进程和线程将立即停止。")
            # 直接结束进程，不等待子进程
            for p in processes:
                try:
                    p.terminate()  # 强制终止进程
                except Exception as e:
                    pass
            # 强制关闭线程池中的任务
            self.stop_event.set()
            logger.info(f"[Main] 程序已退出。")
            logger.info(f"[Main] 进程全部结束，共收集 {len(self.queue)} 条结果。")

    # ------------------------------------------------------------

    @retry(stop=stop_after_attempt(3))
    # type: ignore
    # type: ignore
    def run_task(self,
                 input_text: str,
                 key_words: Set[str],
                 url: str,
                 depth: int,
                 breadth: int,
                 brower: bool,
                 level: int,
                 stop_event: Event):  # type: ignore
        """递归执行单个爬取任务"""

        # 检查停止事件和深度
        if stop_event.is_set() or level > depth:
            return

        logger.info(f"[{level}/{depth}] 正在爬取: {url}")

        # --- 1. 获取内容和链接列表 ---
        text_with_meta: Dict[str, Any]
        url_list: List[str]

        # 修正: 仅对 .pdf 文件调用 extract_pdf_text
        if url.lower().endswith(".pdf"):
            text_with_meta = self.extract_pdf_text(url) or {}
            url_list = []  # PDF 通常不包含可继续爬取的链接
        else:
            # 启动浏览器爬取
            browser = CrawlerBrowser(use_requests=brower)
            result_data = browser.process(url)

            text_with_meta = result_data.get("text_with_meta", {})
            url_list = result_data.get("url_list") or []
        if not text_with_meta:
            return
        if not text_with_meta.get("raw_text"):
            logger.warning(f"URL: {url} 未能提取到有效内容。")
            return

        # --- 2. 计算相关性得分和入队 ---
        score_content = self.evaluate_relevance(
            input_text, key_words, text_with_meta)

        try:
            # 假设 evaluate_relevance 返回的是得分
            score = float(score_content)

            if score > self.min_score:
                self.queue.append(text_with_meta)
                logger.info(
                    f"内容入队成功，score={score}, title={text_with_meta.get('title')}, url={url}")
            else:
                logger.warning(
                    f"内容未达到阈值，score={score}, title={text_with_meta.get('title')}, url={url}")

        except (ValueError, TypeError) as e:
            logger.warning(f"相关性计算结果异常（非数字）：{score_content}。 异常: {e}")
            return  # 分数异常，停止后续爬取

        # --- 3. 准备子链接并进行递归爬取 ---

        # 如果队列已满或已达最大深度，则停止递归
        if stop_event.is_set() or level >= depth:
            return

        urls_to_crawl: List[str] = []
        crawled_count = 0

        for deep_url in url_list:
            if crawled_count >= breadth:  # 控制广度
                break

            # 使用原子操作进行查重和添加
            if deep_url not in self.url_set:
                self.url_set[deep_url] = True  # 标记为已处理
                urls_to_crawl.append(deep_url)
                crawled_count += 1

        if not urls_to_crawl:
            return

        # 使用线程池进行并行处理
        logger.info(
            f"为 {url} 的 {len(urls_to_crawl)} 个子链接启动线程并行爬取 (level {level+1})。")

        def crawl_deep_url(deep_url):
            try:
                # 递归调用 run_task，在线程中执行
                self.run_task(input_text, key_words, deep_url,
                              depth, breadth, brower,  level + 1, stop_event)
            except Exception as e:
                logger.warning(f"子链接线程爬取异常: {deep_url}, Error: {e}")

        with ThreadPool(min(self.thead_pool_max, breadth)) as pool:
            pool.map(crawl_deep_url, urls_to_crawl)

    # ------------------------------------------------------------

    @staticmethod
    def evaluate_relevance(input_text: str, key_words: Set[str], html_result: Dict[str, Any]) -> str:
        """调用 RelevanceScore 模型评估页面相关性"""
        if not html_result or not html_result.get("raw_text"):
            return "0"

        # 修正: 确保 key_words 是列表或字符串，取决于 RelevanceScore 的要求。
        # 这里转换为逗号分隔的字符串，如果模型接受列表，请保留 Set。
        keywords_str = ",".join(key_words)

        agent = RelevanceScore()
        result = agent.run({
            "user_query": input_text,
            "keywords": keywords_str,
            "document": {
                "title": html_result.get("title", ""),
                "body": html_result.get("raw_text", "")
            }
        })
        # 假设 content 是模型返回的得分，其类型是 str
        content = result.get("data", {}).get("content", "0")
        return content

    @staticmethod
    def extract_pdf_text(url: str, timeout: int = 60) -> Dict[str, Any] | None:
        """
        下载指定 URL 的 PDF 并提取其元数据与文本，完成后删除临时文件。

        注意：此方法**仅支持 PDF 文件**。
        """
        import pymupdf4llm  # 仅在需要时导入，防止不必要的依赖加载

        logger.info(f"正在下载文件: {url} ")
        tmp_path = None
        doc = None
        try:
            # --- 1. 下载 PDF 到临时文件 ---
            with requests.get(url, stream=True, timeout=timeout) as resp:
                resp.raise_for_status()
                # 优化: 使用 with tempfile.NamedTemporaryFile... 确保临时文件创建
                with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_file:
                    for chunk in resp.iter_content(chunk_size=8192):
                        if chunk:
                            tmp_file.write(chunk)
                    tmp_path = tmp_file.name

            # --- 2. 提取文本和元数据 ---
            doc = fitz.open(tmp_path)
            metadata = doc.metadata or {}

            # 使用 pymupdf4llm 将 PDF 转为 Markdown 文本
            text = pymupdf4llm.to_markdown(tmp_path, table_strategy="lines")

            # 清理文本
            text_clean = text.replace("<br>", " ").replace("|", " ")
            text_clean = re.sub(r'\n{3,}', '\n', text_clean)  # 压缩多余的换行

            file_name = url.split('/')[-1]

            # 整合到返回字典
            metadata['raw_text'] = text_clean
            metadata['source'] = url
            metadata['title'] = metadata.get('title', file_name)  # 使用文件名作为默认标题
            metadata['file_name'] = file_name

            return metadata

        except Exception as exc:
            logger.warning(f"文件处理或下载失败，URL: {url}, Error: {exc}")
            return None

        finally:
            # --- 3. 清理资源 ---
            try:
                if doc is not None:
                    doc.close()
            except Exception:
                pass

            if tmp_path:
                try:
                    os.remove(tmp_path)
                except Exception as remove_exc:
                    logger.warning(f"删除临时文件失败 {tmp_path}: {remove_exc}")

    # ------------------------------------------------------------

    def get_queue(self) -> List[Dict[str, Any]]:
        """获取队列内容"""
        return list(self.queue)
