from gptAPI import ImageAnalyzer
import threading
from queue import Queue
from pathlib import Path
from typing import Dict


def analyze_pdf_directory_concurrent(analyzer: ImageAnalyzer, local_base_dir_raw: str, base_url: str, prompt_text: str = "请根据这些图片描述整个 PDF 内容") -> Dict[str, str]:
    """
    并发分析整个 PDF 图片目录，每个 PDF 一个线程
    :param analyzer: ImageAnalyzer 实例
    :param local_base_dir: 本地图片目录
    :param base_url: 对应 HTTP URL
    :param prompt_text: 给模型的提示文本
    :return: {pdf_name: overall_description}
    """
    local_base_dir = Path(local_base_dir_raw)
    results = {}
    queue = Queue()

    def worker(pdf_dir: Path):
        pdf_name = pdf_dir.name
        messages_content = []

        for img_file in sorted(pdf_dir.iterdir()):
            if img_file.is_file() and img_file.suffix.lower() in [".png", ".jpg", ".jpeg", ".webp"]:
                url = f"{base_url}/{pdf_name}/{img_file.name}"
                messages_content.append({"image": url})

        messages_content.append({"text": prompt_text})
        response = analyzer.analyze_pdf_directory_single(
            pdf_dir, messages_content)
        queue.put((pdf_name, response))

    # 给 ImageAnalyzer 加一个单 PDF 分析方法（内部使用），不破坏原类
    if not hasattr(analyzer, "analyze_pdf_directory_single"):
        def analyze_pdf_directory_single(self, pdf_dir, messages_content):
            import dashscope
            response = dashscope.MultiModalConversation.call(
                api_key=self.api_key,
                model=self.model,
                messages=[{"role": "user", "content": messages_content}]
            )
            return response.get("content") or str(response)
        setattr(analyzer, "analyze_pdf_directory_single",
                analyze_pdf_directory_single.__get__(analyzer))

    threads: list[threading.Thread] = []
    for pdf_dir in sorted(local_base_dir.iterdir()):
        if pdf_dir.is_dir():
            t = threading.Thread(target=worker, args=(pdf_dir,))
            t.start()
            threads.append(t)

    for t in threads:
        t.join()

    # 收集结果
    while not queue.empty():
        pdf_name, description = queue.get()
        results[pdf_name] = description

    return results


if __name__ == "__main__":
    from pathlib import Path
    import os
    from dotenv import load_dotenv
    load_dotenv()
    analyzer = ImageAnalyzer(api_key="sk-xxx")

    local_dir = "./analysis/pdfs"
    base_url = (os.getenv("IMAGE_URL") or "")+local_dir

    pdf_analysis_results = analyze_pdf_directory_concurrent(
        analyzer, local_dir, base_url)

    for pdf_name, description in pdf_analysis_results.items():
        print(f"{pdf_name} 的分析结果：\n{description}\n")
