from transformers import AutoModelForCausalLM, AutoProcessor
from PIL import Image
import torch


def load_multimodal_model(model_name="Qwen/Qwen2-VL-7B", device="cuda"):
    """加载多模态大模型和处理器"""
    processor = AutoProcessor.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32,
        device_map=device,
        low_cpu_mem_usage=True
    )
    model.eval()
    return processor, model


def generate_answer(processor, model, query, top_images, device="cuda"):
    """
    输入查询和Top-K图像，生成回答
    Args:
        query: 用户查询文本
        top_images: 检索到的Top-K页面图像（PIL.Image列表）
    """
    # 构建输入：图像 + 查询文本
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": query},
                *[{"type": "image"} for _ in top_images]  # 插入图像标记
            ]
        }
    ]

    # 处理输入
    text = processor.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    inputs = processor(
        text=text,
        images=top_images if top_images else None,
        return_tensors="pt"
    ).to(device)

    # 生成回答（使用greedy解码确保确定性）
    with torch.no_grad():
        outputs = model.generate(
            **inputs,
            max_new_tokens=512,
            do_sample=False,
            temperature=0.0
        )

    # 解码输出
    answer = processor.batch_decode(outputs, skip_special_tokens=True)[0]
    return answer.split("assistant\n")[-1].strip()  # 提取回答部分


if __name__ == "__main__":
    import argparse
    import os
    import gc
    from PIL import Image
    import tempfile
    from ollama import Client
    from pdf2image import convert_from_path
    from ColPaliRetriever import ColPaliRetriever
    from datasets import load_dataset

    # 配置环境（可选，解决镜像问题）
    os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"

    # 解析参数
    parser = argparse.ArgumentParser()
    parser.add_argument("--colpali_path", default="./models/colqwen2-v1.0-merged", help="ColPali模型路径")
    parser.add_argument("--ollama_model", default="qwen2.5vl:7b", help="Ollama多模态模型名称")
    parser.add_argument("--parquet_dir", default="./data/DocVQA", help="DocVQA parquet目录")
    parser.add_argument("--split", default="validation", help="数据集split")
    parser.add_argument("--top_k", type=int, default=1, help="检索返回的Top-K图片")
    parser.add_argument("--n", type=int, default=0, help="起始索引")
    parser.add_argument("--m", type=int, default=100, help="结束索引")
    parser.add_argument("--num_threads", type=int, default=12, help="多线程")
    args = parser.parse_args()

    # 1. 生成embedding缓存（如已生成可跳过）
    retriever = ColPaliRetriever(
        model_path=args.colpali_path,
        device="cuda" if os.environ.get("CUDA_VISIBLE_DEVICES") else "cpu",
        assert_pooling=False,
        num_threads=args.num_threads,
    )
    retriever.process_parquet_dataset(
        parquet_dir=args.parquet_dir,
        split=args.split,
        n=args.n,
        m=args.m,
        id_field="questionId",
        image_field="image"
    )

    # 2. 加载DocVQA数据集
    ds = load_dataset(
        "parquet",
        data_files={args.split: os.path.join(args.parquet_dir, f"{args.split}-*.parquet")},
        split=args.split
    )

    # 3. 初始化Ollama客户端
    client = Client(host="http://localhost:11434")  # 默认Ollama服务地址
    print(f"使用Ollama模型: {args.ollama_model}")

    # 4. 遍历问题，生成答案并比对
    correct = 0
    total = args.m - args.n
    
    for i in range(args.n, args.m):
        item = ds[i]
        question = item["question"]
        gt_answers = item["answers"]
        image = item["image"]  # PIL图像对象

        # 保存图像到临时文件（Ollama需要文件路径）
        with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
            image.save(tmp, format="PNG")
            temp_image_path = tmp.name

        try:
            # 可选：用ColPali检索top-k图片（此处直接用当前图片）
            # 构造多模态输入（通过Ollama API）
            response = client.chat(
                model=args.ollama_model,
                messages=[
                    {
                        "role": "user",
                        "content": question,
                        "images": [temp_image_path]  # 传入临时图像路径
                    }
                ],
                stream=False
            )
            pred = response["message"]["content"].strip()

            # 验证答案
            hit = any(ans.lower() in pred.lower() for ans in gt_answers)
            if hit:
                correct += 1

            # 输出调试信息
            print(f"Q{i}: {question}")
            print(f"Pred: {pred}")
            print(f"GT: {gt_answers}")
            print(f"Hit: {hit}")
            print("="*30)

        except Exception as e:
            print(f"处理Q{i}失败: {str(e)}")
            continue

        finally:
            # 清理临时文件和内存
            os.unlink(temp_image_path)  # 删除临时图像
            gc.collect()

    print(f"\n准确率: {correct}/{total} = {correct/total:.2%}")