from transformers import AutoModelForCausalLM, AutoProcessor
from PIL import Image
import torch


def load_multimodal_model(model_name="Qwen/Qwen2-VL-7B", device="cuda"):
    """加载多模态大模型和处理器"""
    processor = AutoProcessor.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32,
        device_map=device,
        low_cpu_mem_usage=True
    )
    model.eval()
    return processor, model


def generate_answer(processor, model, query, top_images, device="cuda"):
    """
    输入查询和Top-K图像，生成回答
    Args:
        query: 用户查询文本
        top_images: 检索到的Top-K页面图像（PIL.Image列表）
    """
    # 构建输入：图像 + 查询文本
    messages = [
        {
            "role": "user",
            "content": [
                {"type": "text", "text": query},
                *[{"type": "image"} for _ in top_images]  # 插入图像标记
            ]
        }
    ]

    # 处理输入
    text = processor.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    inputs = processor(
        text=text,
        images=top_images if top_images else None,
        return_tensors="pt"
    ).to(device)

    # 生成回答（使用greedy解码确保确定性）
    with torch.no_grad():
        outputs = model.generate(
            **inputs,
            max_new_tokens=512,
            do_sample=False,
            temperature=0.0
        )

    # 解码输出
    answer = processor.batch_decode(outputs, skip_special_tokens=True)[0]
    return answer.split("assistant\n")[-1].strip()  # 提取回答部分


if __name__ == "__main__":
    import argparse
    import os
    import torch
    import gc
    from PIL import Image
    from transformers import AutoModelForCausalLM, AutoProcessor, AutoModelForVision2Seq, Qwen2VLProcessor
    from pdf2image import convert_from_path
    from ColPaliRetriever import ColPaliRetriever
    from datasets import load_dataset

    os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"

    # 解析参数
    parser = argparse.ArgumentParser()
    parser.add_argument("--colpali_path", default="./models/colqwen2-v1.0-merged", help="ColPali模型路径")
    parser.add_argument("--vlm_model", default="./models/Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int4", help="多模态生成模型")
    parser.add_argument("--parquet_dir", default="./data/DocVQA", help="DocVQA parquet目录")
    parser.add_argument("--split", default="validation", help="数据集split")
    parser.add_argument("--top_k", type=int, default=1, help="检索返回的Top-K图片")
    parser.add_argument("--n", type=int, default=0, help="起始索引")
    parser.add_argument("--m", type=int, default=100, help="结束索引")
    parser.add_argument("--num_threads", type=int, default=12, help="多线程")
    parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu", help="设备")
    args = parser.parse_args()

    # 1. 生成embedding缓存（如已生成可跳过）
    retriever = ColPaliRetriever(
        model_path=args.colpali_path,
        device=args.device,
        assert_pooling=False,
        num_threads=args.num_threads,
    )
    retriever.process_parquet_dataset(
        parquet_dir=args.parquet_dir,
        split=args.split,
        n=args.n,
        m=args.m,
        id_field="questionId",
        image_field="image"
    )

    # 2. 加载DocVQA数据集
    ds = load_dataset(
        "parquet",
        data_files={args.split: os.path.join(args.parquet_dir, f"{args.split}-*.parquet")},
        split=args.split
    )

    # 3. 加载多模态大模型
    print(f"加载多模态模型: {args.vlm_model}")
    vlm_processor = Qwen2VLProcessor.from_pretrained(
        args.vlm_model,
        processor_config={
        "image_processor": {
            "size": {"shortest_edge": 448, "longest_edge": 448}
        }
    }
    )
    vlm_model = AutoModelForVision2Seq.from_pretrained(
        args.vlm_model,
        torch_dtype=torch.bfloat16 if args.device == "cuda" else torch.float32,
        device_map=args.device
    ).eval()

    # 4. 遍历问题，生成答案并比对
    correct = 0
    total = args.m - args.n
    
    for i in range(args.n, args.m):
        item = ds[i]
        question = item["question"]
        question_id = str(item["questionId"])
        gt_answers = item["answers"]
        image = item["image"]
    
        # 可选：用embedding检索top-k图片（此处直接用当前图片）
        images = [image]
    
        # 构造多模态输入
        messages = [
            {"role": "user", "content": [
                {"type": "text", "text": question},
                *[{"type": "image"} for _ in images]
            ]}
        ]
        
        # 手动处理聊天模板，确保格式正确
        text = vlm_processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
        
        # 移除可能的系统提示前缀
        if "system" in text.lower() and "user" in text.lower():
            # 提取用户问题部分
            text = text.split("user", 1)[1].strip()
        
        # 打印处理后的输入，便于调试
        print(f"处理后的输入: {text[:100]}...")
        
        inputs = vlm_processor(
            text=text,
            images=images,
            return_tensors="pt"
        ).to(args.device)
    
        # 生成答案
        with torch.no_grad():
            # 尝试使用更适合的生成参数
            outputs = vlm_model.generate(
                **inputs,
                max_new_tokens=128,
                num_beams=1,
                do_sample=False,
                temperature=0.0,
                pad_token_id=vlm_processor.tokenizer.eos_token_id  # 确保有pad token
            )
        
        # 解码并处理生成的回答
        pred = vlm_processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
        
        # 移除可能残留的模板标记
        if "assistant" in pred.lower():
            pred = pred.split("assistant", 1)[1].strip()
        
        # 验证答案
        hit = any(ans.lower() in pred.lower() for ans in gt_answers)
        if hit:
            correct += 1
        print(f"Q{i}: {question}")
        print(f"Pred: {pred}")
        print(f"GT: {gt_answers}")
        print(f"Hit: {hit}")
        print("="*30)
        
        # 清理内存
        del inputs, outputs
        gc.collect()
        if args.device == "cuda":
            torch.cuda.empty_cache()
            
    print(f"\n准确率: {correct}/{total} = {correct/total:.2%}")