from transformers import AutoModelForCausalLM, AutoProcessor
from PIL import Image
import torch


def load_multimodal_model(model_name="Qwen/Qwen2-VL-7B", device="cuda"):
    """加载多模态大模型和处理器"""
    processor = AutoProcessor.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32,
        device_map=device,
        low_cpu_mem_usage=True
    )
    model.eval()
    return processor, model


def generate_answer(processor, model, query, top_images, device="cuda"):
    """
    输入查询和Top-K图像，生成回答
    Args:
        query: 用户查询文本
        top_images: 检索到的Top-K页面图像（PIL.Image列表）
    """
    # 构建输入：图像 + 查询文本
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": query},
                *[{"type": "image"} for _ in top_images]  # 插入图像标记
            ]
        }
    ]

    # 处理输入
    text = processor.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    inputs = processor(
        text=text,
        images=top_images if top_images else None,
        return_tensors="pt"
    ).to(device)

    # 生成回答（使用greedy解码确保确定性）
    with torch.no_grad():
        outputs = model.generate(
            **inputs,
            max_new_tokens=512,
            do_sample=False,
            temperature=0.0
        )

    # 解码输出
    answer = processor.batch_decode(outputs, skip_special_tokens=True)[0]
    return answer.split("assistant\n")[-1].strip()  # 提取回答部分

if __name__ == "__main__":
    import argparse
    import os
    import gc
    import time
    import threading
    import torch
    import pickle
    from pathlib import Path
    from tqdm import tqdm
    from PIL import Image
    import tempfile
    from collections import defaultdict
    from ollama import Client
    from datasets import load_dataset
    from pdf2image import convert_from_path
    from ColPaliRetriever import ColPaliRetriever
    # 解析参数
    parser = argparse.ArgumentParser()
    parser.add_argument("--colpali_path", default="./models/colqwen2-v1.0-merged", help="ColPali模型路径")
    parser.add_argument("--ollama_model", default="qwen2.5vl:7b", help="Ollama多模态模型名称")
    parser.add_argument("--parquet_dir", default="./data/DocVQA", help="DocVQA parquet目录")
    parser.add_argument("--split", default="validation", help="数据集split")
    parser.add_argument("--top_k", type=int, default=3, help="检索返回的Top-K图片")
    parser.add_argument("--n", type=int, default=0, help="起始索引")
    parser.add_argument("--m", type=int, default=100, help="结束索引")
    parser.add_argument("--num_threads", type=int, default=12, help="多线程数量")
    args = parser.parse_args()

    # 1. 加载数据集并预处理：构建文档-图片映射（按ucsf_document_id分组）
    print("加载数据集并构建文档-图片映射...")
    
    # 缓存路径设置
    cache_dir = Path("./cache")
    cache_dir.mkdir(parents=True, exist_ok=True)
    doc_images_cache = cache_dir / "document_images.pkl"  # 文档-图片列表映射
    question_map_cache = cache_dir / "question_doc_map.pkl"  # 问题-文档ID映射

    # 检查缓存是否存在
    if doc_images_cache.exists() and question_map_cache.exists():
        print(f"发现缓存文件，加载缓存...")
        with open(doc_images_cache, "rb") as f:
            document_images = pickle.load(f)  # key: doc_id, value: [image1, image2, ...]
        with open(question_map_cache, "rb") as f:
            question_doc_map = pickle.load(f)  # key: question_id, value: doc_id
        print(f"缓存加载完成，共{len(document_images)}个文档，{len(question_doc_map)}个问题映射")
    else:
        # 缓存不存在，重新预处理
        ds = load_dataset(
            "parquet",
            data_files={args.split: os.path.join(args.parquet_dir, f"{args.split}-*.parquet")},
            split=args.split
        )

        # 构建：ucsf_document_id -> [image1, image2, ...]（按页码去重排序）
        document_images = defaultdict(dict)  # 临时存储：doc_id -> {page_no: image}
        question_doc_map = {}  # 问题ID -> 所属文档ID

        for item in tqdm(ds, desc="构建文档-图片映射"):
            doc_id = item["ucsf_document_id"]
            page_no = item["ucsf_document_page_no"]
            image = item["image"]
            question_id = str(item["questionId"])

            # 记录问题与文档的映射（每个问题只记一次）
            if question_id not in question_doc_map:
                question_doc_map[question_id] = doc_id

            # 按页码去重（保留同一文档同一页码的第一张图片）
            if page_no not in document_images[doc_id]:
                document_images[doc_id][page_no] = image

        # 转换为列表格式（按页码排序）
        for doc_id in tqdm(document_images, desc="排序文档内图片"):
            sorted_pages = sorted(document_images[doc_id].items(), key=lambda x: x[0])
            document_images[doc_id] = [img for (page_no, img) in sorted_pages]

        # 保存缓存
        with open(doc_images_cache, "wb") as f:
            pickle.dump(document_images, f)
        with open(question_map_cache, "wb") as f:
            pickle.dump(question_doc_map, f)
        print(f"缓存已保存至：{cache_dir}")

    # 2. 加载数据集（用于遍历问题）
    ds = load_dataset(
        "parquet",
        data_files={args.split: os.path.join(args.parquet_dir, f"{args.split}-*.parquet")},
        split=args.split
    )

    # 3. 初始化ColPali检索器（用于文档内图片检索）
    retriever = ColPaliRetriever(
        model_path=args.colpali_path,
        device="cuda" if os.environ.get("CUDA_VISIBLE_DEVICES") else "cpu",
        assert_pooling=False,
        num_threads=args.num_threads,
    )

    # 4. 初始化Ollama客户端
    client = Client(host="http://localhost:11434")
    print(f"使用Ollama模型: {args.ollama_model}")

    # 5. 遍历问题，执行RAG并验证
    correct = 0
    total = 0
    print(f"\n开始处理问题（{args.n}到{args.m}）...")

    for i in range(args.n, args.m):
        item = ds[i]
        question = item["question"]
        gt_answers = item["answers"]
        question_id = str(item["questionId"])
        total += 1

        # 获取问题所属的文档ID
        if question_id not in question_doc_map:
            print(f"Q{i}：问题{question_id}无关联文档，跳过")
            continue
        doc_id = question_doc_map[question_id]

        # 获取文档内所有图片
        if doc_id not in document_images:
            print(f"Q{i}：文档{doc_id}不存在图片，跳过")
            continue
        doc_image_list = document_images[doc_id]  # 该文档的所有图片（去重排序后）
        doc_image_count = len(doc_image_list)
        print(f"Q{i}：文档{doc_id}包含{doc_image_count}张图片，开始检索...")

        try:
            # 生成图片唯一ID（格式：doc_id_page_{index}）
            image_ids = [f"{doc_id}_page_{idx}" for idx in range(doc_image_count)]

            # 1. 编码问题文本
            question_emb = retriever.encode_text(question)
            
            # 2. 编码文档内所有图片（带缓存检测）
            image_embs = retriever.encode_images(
                images=doc_image_list,
                image_ids=image_ids,  # 传入图片唯一ID，用于缓存
                batch_size=4  # 批量大小根据设备调整
            )
            
            # 3. 计算问题与每张图片的相似度
            similarities = retriever.compute_similarity(question_emb, image_embs)
            
            # 4. 取Top-K最相关的图片
            top_k = min(args.top_k, doc_image_count)  # 避免图片数量不足
            top_k_indices = similarities.argsort(descending=True)[:top_k]
            top_k_images = [doc_image_list[idx] for idx in top_k_indices]
            top_k_scores = [similarities[idx].item() for idx in top_k_indices]
            print(f"Q{i}：检索到Top-{top_k}相关图片，相似度：{top_k_scores}")

            # 5. 保存Top-K图片为临时文件（供Ollama读取）
            temp_image_paths = []
            for img in top_k_images:
                with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
                    img.save(tmp, format="PNG")
                    temp_image_paths.append(tmp.name)

            # 6. 调用Ollama多模态模型生成答案
            response = client.chat(
                model=args.ollama_model,
                messages=[
                    {
                        "role": "user",
                        "content": f"基于提供的图片回答问题：{question}",
                        "images": temp_image_paths
                    }
                ],
                stream=False
            )
            pred = response["message"]["content"].strip()

            # 7. 验证答案
            hit = any(ans.lower() in pred.lower() for ans in gt_answers)
            if hit:
                correct += 1

            # 输出结果
            print(f"Q{i}：问题：{question}")
            print(f"预测答案：{pred}")
            print(f"参考答案：{gt_answers}")
            print(f"检索有效：{hit}")
            print("="*50)

        except Exception as e:
            print(f"Q{i}处理失败：{str(e)}")
            continue

        finally:
            # 清理临时文件
            for path in temp_image_paths:
                if os.path.exists(path):
                    os.unlink(path)
            gc.collect()

    # 输出最终结果
    print(f"\n===== 结果总结 =====")
    print(f"总问题数：{total}")
    print(f"正确数：{correct}")
    print(f"准确率：{correct/total:.2%}")