import os
import torch
import gc
import ssl
import requests
from PIL import Image
from transformers import Qwen2VLProcessor, AutoModelForVision2Seq
from datasets import load_dataset

# 禁用SSL证书验证（解决证书错误）
ssl._create_default_https_context = ssl._create_unverified_context
requests.packages.urllib3.disable_warnings()  # 禁用相关警告

class Qwen2VLValidator:
    def __init__(self, device="cuda", use_local_model=False, local_model_path=None):
        self.device = device if torch.cuda.is_available() else "cpu"
        print(f"使用设备: {self.device}")
        
        # 选择加载本地模型或官方模型
        self.use_local_model = use_local_model
        self.local_model_path = local_model_path
        self.load_model()
        
        # 加载测试数据集（支持本地加载）
        self.load_dataset()

    def load_model(self):
        """加载模型（支持本地模型和官方模型）"""
        print("正在加载模型...")
        if self.use_local_model and self.local_model_path:
            # 加载本地模型
            model_path = self.local_model_path
            print(f"加载本地模型: {model_path}")
        else:
            # 加载Hugging Face官方模型（禁用SSL验证）
            model_path = "Qwen/Qwen2-VL-7B-Instruct"
            print(f"加载官方模型: {model_path}")
        
        # 加载处理器
        self.processor = Qwen2VLProcessor.from_pretrained(
            model_path,
            trust_remote_code=True,
            processor_config={
                "image_processor": {
                    "size": {"shortest_edge": 448, "longest_edge": 448}
                }
            },
            use_auth_token=False  # 无需认证
        )
        
        # 加载模型
        self.model = AutoModelForVision2Seq.from_pretrained(
            model_path,
            torch_dtype=torch.bfloat16 if self.device == "cuda" else torch.float32,
            device_map="auto",
            trust_remote_code=True,
            use_auth_token=False
        ).eval()
        print("模型加载完成")

    def load_dataset(self):
        """加载数据集（支持本地Parquet文件或远程数据集）"""
        print("加载测试数据集...")
        try:
            # 优先尝试加载本地DocVQA的Parquet文件（替换为你的本地路径）
            local_parquet_dir = "./data/DocVQA"  # 你的本地DocVQA目录
            if os.path.exists(local_parquet_dir):
                print(f"使用本地数据集: {local_parquet_dir}")
                self.ds = load_dataset(
                    "parquet",
                    data_files={"validation": os.path.join(local_parquet_dir, "validation-*.parquet")},
                    split="validation[:10]"  # 取前10条
                )
            else:
                # 加载远程数据集（禁用SSL验证）
                print("使用远程数据集")
                self.ds = load_dataset(
                    "lmms-lab/DocVQA",
                    "DocVQA",
                    split="validation[:10]",
                    trust_remote_code=True
                )
            print(f"数据集加载完成，共{len(self.ds)}条数据")
        except Exception as e:
            print(f"数据集加载失败，使用内置测试数据: {e}")
            # 生成模拟数据（无实际图像，仅用于测试文本处理）
            self.ds = [
                {
                    "question": "What is the title of the document?",
                    "answers": ["Sample Document"],
                    "image": Image.new("RGB", (448, 448), color="white")  # 空白图像
                }
            ]

    def check_image_validity(self, image):
        """验证图像格式"""
        if not isinstance(image, Image.Image):
            raise ValueError("图像不是PIL格式")
        # 转换为RGB
        if image.mode != "RGB":
            image = image.convert("RGB")
        print(f"图像模式: {image.mode}, 尺寸: {image.size}")
        return image

    def generate_and_validate(self):
        """生成并验证输出"""
        correct = 0
        total = len(self.ds)
        print(f"\n开始验证，共{total}条数据")
        
        for i, item in enumerate(self.ds):
            print(f"\n===== 样本 {i+1}/{total} =====")
            try:
                question = item["question"]
                gt_answers = item["answers"]
                image = item["image"] if "image" in item else Image.new("RGB", (448, 448))
            except Exception as e:
                print(f"解析数据失败: {e}，跳过")
                continue
            
            # 验证图像
            try:
                image = self.check_image_validity(image)
            except Exception as e:
                print(f"图像处理失败: {e}，跳过")
                continue
            
            # 构造输入
            messages = [
                {"role": "system", "content": "Answer based on the image."},
                {"role": "user", "content": [
                    {"type": "text", "text": question},
                    {"type": "image"}
                ]}
            ]
            text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
            print(f"输入文本: {text[:150]}...")
            
            # 编码输入
            try:
                inputs = self.processor(
                    text=text,
                    images=image,
                    return_tensors="pt"
                ).to(self.device)
                if "pixel_values" not in inputs:
                    print("警告：输入不含图像数据")
            except Exception as e:
                print(f"编码失败: {e}，跳过")
                continue
            
            # 生成回答
            try:
                with torch.no_grad():
                    outputs = self.model.generate(
                        **inputs,
                        max_new_tokens=128,
                        num_beams=1,
                        do_sample=False
                    )
            except Exception as e:
                print(f"生成失败: {e}，跳过")
                continue
            
            # 解码
            pred = self.processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
            print(f"问题: {question}")
            print(f"模型输出: {pred}")
            print(f"参考答案: {gt_answers}")
            
            # 验证
            hit = any(ans.lower() in pred.lower() for ans in gt_answers)
            if hit:
                correct += 1
                print("结果: 正确")
            else:
                print("结果: 错误")
            
            # 清理内存
            del inputs, outputs
            gc.collect()
            if self.device == "cuda":
                torch.cuda.empty_cache()
        
        print(f"\n===== 验证结束 =====")
        print(f"准确率: {correct}/{total} = {correct/total:.2%}")

if __name__ == "__main__":
    # 配置：如需使用本地模型，修改为True和你的模型路径
    USE_LOCAL_MODEL = True  # True:使用本地模型，False:使用官方模型
    LOCAL_MODEL_PATH = "./models/Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int4"  # 你的本地模型路径
    
    validator = Qwen2VLValidator(
        device="cuda" if torch.cuda.is_available() else "cpu",
        use_local_model=USE_LOCAL_MODEL,
        local_model_path=LOCAL_MODEL_PATH
    )
    validator.generate_and_validate()