import argparse
import io
import json
import os

import torch
from datasets import load_dataset
from PIL import Image
from tqdm import tqdm
from transformers import (
    AutoProcessor,
    AutoTokenizer,
    Qwen2_5_VLForConditionalGeneration,
)

MODEL_PATH = "./output/tari-product-image"

# 加载模型
print("正在加载模型...")
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
    MODEL_PATH, device_map="auto"
)
processor = AutoProcessor.from_pretrained(MODEL_PATH)
print("模型加载完成")


# 从 HuggingFace Hub 加载测试集数据
def load_test_data_from_hf(dataset_repo):
    print(f"正在从 {dataset_repo} 加载测试数据集...")
    dataset = load_dataset(dataset_repo, split="test")
    print(f"测试数据集加载完成，共 {len(dataset)} 个样本")
    return dataset


# 评估单个样本
def evaluate_sample(question, image):
    # 处理图像对象为 PIL Image
    if isinstance(image, dict) and "bytes" in image:
        image = Image.open(io.BytesIO(image["bytes"]))

    if image is None:
        processed_conversation = [
            {
                "role": "user",
                "content": [
                    {
                        "type": "text",
                        "text": question,
                    }
                ],
            }
        ]
    else:
        processed_conversation = [
            {
                "role": "user",
                "content": [
                    {
                        "type": "image",
                        "image": image,
                    },
                    {
                        "type": "text",
                        "text": question.replace("<image>\n", ""),
                    },
                ],
            }
        ]

    inputs = processor.apply_chat_template(
        processed_conversation,
        add_generation_prompt=True,
        tokenize=True,
        return_dict=True,
        return_tensors="pt",
    ).to(model.device)

    # 生成回复
    output_ids = model.generate(**inputs, max_new_tokens=128)
    generated_ids = [
        output_ids[i][len(inputs.input_ids[i]) :] for i in range(len(output_ids))
    ]
    output_text = processor.batch_decode(
        generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
    )[0]

    return output_text


# 计算准确率、召回率和F1值
def calculate_metrics(predictions, ground_truths):
    if not predictions or not ground_truths:
        return 0, 0, 0

    true_positives = 0

    for pred, gt in zip(predictions, ground_truths):
        if pred.strip() == gt.strip():
            true_positives += 1

    total_predictions = len(predictions)
    total_ground_truths = len(ground_truths)

    precision = true_positives / total_predictions if total_predictions else 0
    recall = true_positives / total_ground_truths if total_ground_truths else 0
    f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) else 0

    return precision, recall, f1


def main(dataset_repo):
    # 从HuggingFace Hub加载测试集
    test_data = load_test_data_from_hf(dataset_repo)
    predictions = []
    ground_truths = []

    print(f"开始评估测试集，共 {len(test_data)} 个样本")

    for i, sample in enumerate(tqdm(test_data)):
        # 解析对话数据
        conversations = sample["conversations"]
        image = sample["image"]

        # 获取问题和参考答案
        question = conversations[0]["value"]
        ground_truth = conversations[1]["value"]
        # 预测结果
        prediction = evaluate_sample(question, image)

        predictions.append(prediction)
        ground_truths.append(ground_truth)

        # 打印部分样本的预测结果进行检查
        if i % 5 == 0:  # 每5个样本打印一次
            print(f"样本 {len(predictions)}:")
            print(f"问题: {question}")
            print(f"预测: {prediction}")
            print(f"正确答案: {ground_truth}")
            print("-" * 50)

    # 计算并输出准确率、召回率和F1值
    precision, recall, f1 = calculate_metrics(predictions, ground_truths)
    correct_count = sum(
        p.strip() == g.strip() for p, g in zip(predictions, ground_truths)
    )

    print(f"测试集准确率(精确率): {precision:.4f} ({correct_count}/{len(predictions)})")
    print(f"测试集召回率: {recall:.4f} ({correct_count}/{len(ground_truths)})")
    print(f"测试集F1值: {f1:.4f}")

    # 保存预测结果
    results = []
    for i, (pred, gt) in enumerate(zip(predictions, ground_truths)):
        results.append(
            {
                "index": i,
                "prediction": pred,
                "ground_truth": gt,
                "correct": pred.strip() == gt.strip(),
            }
        )

    # 添加总体指标到结果中
    metrics = {
        "precision": precision,
        "recall": recall,
        "f1": f1,
        "correct_count": correct_count,
        "total_samples": len(predictions),
    }

    with open("test_results.json", "w", encoding="utf-8") as f:
        json.dump(
            {"results": results, "metrics": metrics}, f, ensure_ascii=False, indent=2
        )

    print("预测结果已保存到 test_results.json")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="使用HuggingFace数据集评估模型")
    parser.add_argument(
        "--dataset_repo",
        type=str,
        default="BrightXiaoHan/tari-product-image",
        help="HuggingFace数据集的仓库ID，格式为'username/dataset-name'",
    )

    args = parser.parse_args()
    main(args.dataset_repo)