#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import json
import numpy as np
from datetime import datetime
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from transformers import AutoModel, AutoTokenizer, AutoProcessor
import torch
from PIL import Image

class ValidDatasetLoader:
    """Valid数据集加载器 - 支持严格分流和文件名筛选"""

    def __init__(self, data_root):
        self.data_root = data_root
        self._build_image_path_mapping()

    def _build_image_path_mapping(self):
        self.image_path_map = {}
        image_base_path = os.path.join(self.data_root, "images")
        if os.path.exists(image_base_path):
            print(f"🔍 扫描图片目录: {image_base_path}")
            for root, dirs, files in os.walk(image_base_path):
                for file in files:
                    if file.lower().endswith(('.jpg', '.jpeg', '.png', '.tif', '.tiff')):
                        full_path = os.path.join(root, file)
                        self.image_path_map[file] = full_path
                        rel_path = os.path.relpath(full_path, image_base_path)
                        self.image_path_map[rel_path] = full_path
                        dataset_dir = os.path.basename(os.path.dirname(full_path))
                        if dataset_dir != "images":
                            self.image_path_map[f"{dataset_dir}/{file}"] = full_path
        print(f"📊 构建了 {len(self.image_path_map)} 个图片路径映射")

    def _get_image_path(self, image_id):
        if image_id in self.image_path_map:
            return self.image_path_map[image_id]
        variants = [
            image_id,
            os.path.basename(image_id),
            image_id.replace('/', os.sep),
            image_id.replace('\\', os.sep),
        ]
        for variant in variants:
            if variant in self.image_path_map:
                return self.image_path_map[variant]
        image_name = os.path.basename(image_id)
        for key, path in self.image_path_map.items():
            if image_name in key or image_id in key:
                return path
        print(f"⚠️ 未找到图片: {image_id}")
        return None

    def _is_chinese(self, text):
        for ch in text:
            if '\u4e00' <= ch <= '\u9fff':
                return True
        return False

    def _is_caption_file(self, filename):
        name = os.path.basename(filename).lower()
        return "caption" in name

    def _is_mcq_file(self, filename):
        # 只保留"mcq"筛选机制
        name = os.path.basename(filename).lower()
        return "mcq" in name

    def load_caption_data(self, json_files, max_samples=None):
        all_samples = []
        for json_file in json_files:
            if not self._is_caption_file(json_file):
                continue
            if not os.path.isabs(json_file):
                file_path = os.path.join(self.data_root, json_file)
            else:
                file_path = json_file
            if not os.path.exists(file_path):
                print(f"⚠️ 文件不存在: {json_file}")
                continue
            print(f"📝 加载Caption数据: {os.path.basename(json_file)}")
            try:
                with open(file_path, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                for item in data:
                    if isinstance(item, dict):
                        image_id = item.get('Image', item.get('image', ''))
                        if not image_id:
                            continue
                        ground_truth = item.get('Ground truth', '')
                        question = item.get('Text', 'Describe the image in detail.')
                        question_type = item.get('Question Type', '')
                        answer_choices = item.get('Answer choices', [])
                        if question_type != 'caption':
                            continue
                        if answer_choices:
                            continue
                        if self._is_chinese(ground_truth):
                            if "请用中文" not in question:
                                question += " 请用中文回答。"
                        else:
                            if "Please answer in English" not in question:
                                question += " Please answer in English."
                        sample = {
                            'image_id': image_id,
                            'image_path': self._get_image_path(image_id),
                            'question': question,
                            'ground_truth': ground_truth,
                            'task': item.get('Task', 'Image caption'),
                            'subtask': item.get('Subtask', ''),
                            'question_id': item.get('Question id', ''),
                            'question_type': question_type
                        }
                        if sample['ground_truth'] and sample['image_path']:
                            all_samples.append(sample)
            except Exception as e:
                print(f"❌ 加载文件失败 {json_file}: {e}")
                continue
        if max_samples:
            all_samples = all_samples[:max_samples]
        print(f"✅ 总共加载 {len(all_samples)} 个Caption样本")
        return all_samples

    def load_vqa_data(self, json_files, max_samples=None):
        all_samples = []
        for json_file in json_files:
            if not self._is_mcq_file(json_file):
                continue
            if not os.path.isabs(json_file):
                file_path = os.path.join(self.data_root, json_file)
            else:
                file_path = json_file
            if not os.path.exists(file_path):
                print(f"⚠️ 文件不存在: {json_file}")
                continue
            print(f"❓ 加载VQA数据: {os.path.basename(json_file)}")
            try:
                with open(file_path, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                for item in data:
                    if isinstance(item, dict):
                        image1 = item.get('Image1', item.get('Image', ''))
                        image2 = item.get('Image2', '')
                        if not image1:
                            continue
                        ground_truth = item.get('Ground truth', '')
                        question = item.get('Text', '')
                        question_type = item.get('Question Type', '')
                        answer_choices = item.get('Answer choices', [])
                        if not answer_choices:
                            continue
                        if self._is_chinese(ground_truth):
                            if "请用中文" not in question:
                                question += " 请用中文回答。"
                        else:
                            if "Please answer in English" not in question:
                                question += " Please answer in English."
                        sample = {
                            'image_id': image1,
                            'image_path': self._get_image_path(image1),
                            'image2_id': image2,
                            'image2_path': self._get_image_path(image2) if image2 else None,
                            'question': question,
                            'answer_choices': answer_choices,
                            'ground_truth': ground_truth,
                            'task': item.get('Task', 'VQA'),
                            'subtask': item.get('Subtask', ''),
                            'question_id': item.get('Question id', ''),
                            'question_type': question_type
                        }
                        if sample['ground_truth'] and sample['image_path']:
                            all_samples.append(sample)
            except Exception as e:
                print(f"❌ 加载文件失败 {json_file}: {e}")
                continue
        if max_samples:
            all_samples = all_samples[:max_samples]
        print(f"✅ 总共加载 {len(all_samples)} 个VQA样本")
        return all_samples

class ValidDatasetEvaluator:
    """Valid数据集评估器"""
    def __init__(self, model, tokenizer, processor):
        self.model = model
        self.tokenizer = tokenizer
        self.processor = processor

    def evaluate_caption(self, data):
        bleu_scores = []
        detailed_results = []
        print("📝 开始图像描述任务评估...")
        print(f"📊 样本数量: {len(data)}")
        for idx, item in enumerate(data):
            print(f"  [{idx+1}/{len(data)}] 处理图片: {item['image_id']}")
            try:
                image = self._load_image(item['image_path'])
                if image is None:
                    raise FileNotFoundError(f"图片不存在: {item['image_path']}")
                question = item['question']
                response = self._model_inference(image, question, max_tokens=256)
                print(f"    🤖 模型输出: {response[:80]}...")
                print(f"    🎯 标准答案: {item['ground_truth'][:80]}...")
                bleu_score = self._calculate_bleu(item['ground_truth'], response)
                bleu_scores.append(bleu_score)
                detailed_results.append({
                    'image_id': item['image_id'],
                    'question': question,
                    'ground_truth': item['ground_truth'],
                    'prediction': response,
                    'bleu_score': bleu_score,
                    'task': item.get('task', ''),
                    'subtask': item.get('subtask', '')
                })
                print(f"    📊 BLEU分数: {bleu_score:.4f}")
            except Exception as e:
                print(f"    ❌ 处理失败: {e}")
                bleu_scores.append(0.0)
                detailed_results.append({
                    'image_id': item['image_id'],
                    'error': str(e),
                    'bleu_score': 0.0
                })
        avg_bleu = np.mean(bleu_scores) if bleu_scores else 0
        final_score = avg_bleu * 25
        print(f"✅ Caption评估完成")
        print(f"📊 平均BLEU: {avg_bleu:.4f}")
        print(f"🏆 Caption得分: {final_score:.2f}/25")
        return final_score, avg_bleu, detailed_results

    def evaluate_vqa(self, data):
        correct = 0
        total = len(data)
        detailed_results = []
        subtask_stats = {}
        print("❓ 开始VQA任务评估...")
        print(f"📊 样本数量: {total}")
        for idx, item in enumerate(data):
            print(f"  [{idx+1}/{total}] 处理图片: {item['image_id']}")
            print(f"    📋 任务: {item.get('subtask', 'Unknown')}")
            try:
                image = self._load_image(item['image_path'])
                if image is None:
                    raise FileNotFoundError(f"图片不存在: {item['image_path']}")
                if item.get('image2_path'):
                    image2 = self._load_image(item['image2_path'])
                    if image2 is None:
                        print(f"    ⚠️ 第二张图片加载失败，使用单图像模式")
                        question = self._build_mcq_question(item['question'], item['answer_choices'])
                        response = self._model_inference(image, question, max_tokens=64)
                    else:
                        question = self._build_dual_image_question(item['question'], item['answer_choices'])
                        response = self._model_inference_dual_image(image, image2, question, max_tokens=64)
                else:
                    question = self._build_mcq_question(item['question'], item['answer_choices'])
                    response = self._model_inference(image, question, max_tokens=64)
                print(f"    🤖 模型输出: {response}")
                print(f"    🎯 标准答案: {item['ground_truth']}")
                is_correct = self._match_mcq_answer(item['ground_truth'], response, item['answer_choices'])
                if is_correct:
                    correct += 1
                    print(f"    ✅ 正确")
                else:
                    print(f"    ❌ 错误")
                subtask = item.get('subtask', 'Unknown')
                if subtask not in subtask_stats:
                    subtask_stats[subtask] = {'correct': 0, 'total': 0}
                subtask_stats[subtask]['total'] += 1
                if is_correct:
                    subtask_stats[subtask]['correct'] += 1
                detailed_results.append({
                    'image_id': item['image_id'],
                    'question': item['question'],
                    'answer_choices': item['answer_choices'],
                    'ground_truth': item['ground_truth'],
                    'prediction': response,
                    'is_correct': is_correct,
                    'task': item.get('task', ''),
                    'subtask': subtask
                })
            except Exception as e:
                print(f"    ❌ 处理失败: {e}")
                detailed_results.append({
                    'image_id': item['image_id'],
                    'error': str(e),
                    'is_correct': False
                })
        accuracy = correct / total if total > 0 else 0
        final_score = accuracy * 75
        print(f"✅ VQA评估完成")
        print(f"📊 总体准确率: {accuracy:.4f} ({correct}/{total})")
        print(f"🏆 VQA得分: {final_score:.2f}/75")
        print("\n📋 子任务详细结果:")
        for subtask, stats in subtask_stats.items():
            sub_acc = stats['correct'] / stats['total'] if stats['total'] > 0 else 0
            print(f"  {subtask}: {sub_acc:.4f} ({stats['correct']}/{stats['total']})")
        return final_score, accuracy, detailed_results, subtask_stats

    def _load_image(self, image_path):
        if not image_path or not os.path.exists(image_path):
            print(f"⚠️ 图片文件不存在: {image_path}")
            return None
        try:
            return Image.open(image_path).convert('RGB')
        except Exception as e:
            print(f"⚠️ 图片加载失败: {e}")
            return None

    def _model_inference(self, image, question, max_tokens=256):
        msgs = [{'role': 'user', 'content': [image, question]}]
        with torch.no_grad():
            response = self.model.chat(
                image=None,
                msgs=msgs,
                tokenizer=self.tokenizer,
                sampling=False,
                max_new_tokens=max_tokens
            )
        return response.strip()

    def _model_inference_dual_image(self, image1, image2, question, max_tokens=64):
        try:
            msgs = [{'role': 'user', 'content': [image1, image2, question]}]
            with torch.no_grad():
                response = self.model.chat(
                    image=None,
                    msgs=msgs,
                    tokenizer=self.tokenizer,
                    sampling=False,
                    max_new_tokens=max_tokens
                )
            return response.strip()
        except Exception as e:
            print(f"    ⚠️ 双图像处理失败，尝试单图像: {e}")
            return self._model_inference(image1, question, max_tokens)

    def _build_mcq_question(self, question, choices):
        if not choices:
            return question
        options_text = "\n".join(choices)
        return f"{question}\n\nOptions:\n{options_text}\n\nAnswer:"

    def _build_dual_image_question(self, question, choices):
        base_question = "Please compare these two images and answer the following question.\n\n"
        return base_question + self._build_mcq_question(question, choices)

    def _calculate_bleu(self, ground_truth, prediction):
        try:
            ref = ground_truth.strip().lower().split()
            cand = prediction.strip().lower().split()
            if not ref or not cand:
                return 0.0
            smoothing = SmoothingFunction().method4
            bleu_1 = sentence_bleu([ref], cand, weights=(1,0,0,0), smoothing_function=smoothing)
            bleu_2 = sentence_bleu([ref], cand, weights=(0.5,0.5,0,0), smoothing_function=smoothing)
            bleu_4 = sentence_bleu([ref], cand, weights=(0.25,0.25,0.25,0.25), smoothing_function=smoothing)
            return (bleu_1 + bleu_2 + bleu_4) / 3
        except:
            return 0.0

    def _match_mcq_answer(self, ground_truth, prediction, choices):
        gt = ground_truth.strip().upper()
        pred = prediction.strip().upper()
        if len(gt) > 1 and all(c in 'ABCD' for c in gt):
            import re
            gt_options = set(gt)
            pred_letters = set(re.findall(r'\b[A-D]\b', pred))
            if pred_letters == gt_options:
                return True
            if gt_options.issubset(pred_letters):
                return True
            if all(f"({option})" in pred for option in gt_options):
                return True
            if len(gt_options) == 4 and all(option in pred for option in ['A', 'B', 'C', 'D']):
                return True
            return False
        else:
            if gt in pred:
                return True
            import re
            pred_letters = re.findall(r'\b[A-D]\b', pred)
            if pred_letters and gt in pred_letters:
                return True
            if choices:
                for i, choice in enumerate(choices):
                    choice_letter = chr(65 + i)
                    if choice_letter == gt:
                        choice_content = choice.strip()
                        if choice_content.startswith(f"({choice_letter})"):
                            choice_content = choice_content[3:].strip()
                        if choice_content.lower() in pred.lower():
                            return True
            return False

def main():
    # ========== 配置参数 ==========
    DATA_ROOT = r"F:\FM9G4B-V (1)\valid\valid"  # 数据集根目录
    MODEL_PATH = r"F:\FM9G4B-V (1)\FM9G4B-V"    # 模型路径
    OUTPUT_DIR = r"F:\FM9G4B-V (1)"             # 结果保存目录

    # 手动指定要评测的json文件（相对DATA_ROOT的路径）
    JSON_FILES = [
        "en_caption.json",
        "en_mcq.json",
        "subset_low/en/Land_use_classification__Regional_Land_use_classification.json"
        # 可继续添加更多json文件
    ]

    # 样本数量控制
    MAX_CAPTION_SAMPLES = 1
    MAX_VQA_SAMPLES = 3
    # ==============================

    print("🚀 Valid数据集评测系统启动")
    print(f"📁 数据根目录: {DATA_ROOT}")
    print(f"🤖 模型路径: {MODEL_PATH}")
    print(f"📋 手动指定评测文件: {JSON_FILES}")

    # 加载模型
    print(f"\n🔧 正在加载模型...")
    try:
        model = AutoModel.from_pretrained(
            MODEL_PATH,
            trust_remote_code=True,
            attn_implementation='sdpa',
            torch_dtype=torch.bfloat16
        ).eval().cuda()
        tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True)
        processor = AutoProcessor.from_pretrained(MODEL_PATH, trust_remote_code=True)
        print("✅ 模型加载成功")
    except Exception as e:
        print(f"❌ 模型加载失败: {e}")
        return

    # 加载数据（严格分流）
    print(f"\n📊 正在加载数据...")
    loader = ValidDatasetLoader(DATA_ROOT)
    caption_data = loader.load_caption_data(JSON_FILES, max_samples=MAX_CAPTION_SAMPLES)
    vqa_data = loader.load_vqa_data(JSON_FILES, max_samples=MAX_VQA_SAMPLES)

    if not caption_data and not vqa_data:
        print("❌ 未加载到任何数据，请检查数据路径和配置")
        return

    evaluator = ValidDatasetEvaluator(model, tokenizer, processor)

    results = {
        'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
        'config': {
            'data_root': DATA_ROOT,
            'model_path': MODEL_PATH,
            'json_files': JSON_FILES,
            'max_caption_samples': MAX_CAPTION_SAMPLES,
            'max_vqa_samples': MAX_VQA_SAMPLES
        },
        'caption': {},
        'vqa': {},
        'summary': {}
    }

    total_score = 0

    # Caption评估
    if caption_data:
        print(f"\n{'='*60}")
        caption_score, caption_bleu, caption_details = evaluator.evaluate_caption(caption_data)
        results['caption'] = {
            'score': caption_score,
            'bleu': caption_bleu,
            'sample_count': len(caption_data),
            'details': caption_details
        }
        total_score += caption_score
    else:
        print("\n⚠️ 跳过Caption评估：无数据")
        results['caption'] = {'score': 0, 'sample_count': 0, 'message': 'No data'}

    # VQA评估
    if vqa_data:
        print(f"\n{'='*60}")
        vqa_score, vqa_acc, vqa_details, vqa_subtask_stats = evaluator.evaluate_vqa(vqa_data)
        results['vqa'] = {
            'score': vqa_score,
            'accuracy': vqa_acc,
            'sample_count': len(vqa_data),
            'subtask_stats': vqa_subtask_stats,
            'details': vqa_details
        }
        total_score += vqa_score
    else:
        print("\n⚠️ 跳过VQA评估：无数据")
        results['vqa'] = {'score': 0, 'sample_count': 0, 'message': 'No data'}

    # 汇总结果
    results['summary'] = {
        'total_score': total_score,
        'max_score': 100,
        'percentage': (total_score / 100) * 100 if total_score > 0 else 0
    }

    # 打印最终结果
    print(f"\n{'='*60}")
    print("🏆 最终评估结果")
    print(f"{'='*60}")
    print(f"📝 Caption得分: {results['caption'].get('score', 0):.2f}/25")
    print(f"❓ VQA得分: {results['vqa'].get('score', 0):.2f}/75")
    print(f"🎯 总得分: {total_score:.2f}/100 ({results['summary']['percentage']:.1f}%)")

    if results['vqa'].get('subtask_stats'):
        print(f"\n📋 VQA子任务详情:")
        for subtask, stats in results['vqa']['subtask_stats'].items():
            acc = stats['correct'] / stats['total'] if stats['total'] > 0 else 0
            print(f"  {subtask}: {acc:.1%} ({stats['correct']}/{stats['total']})")

    # 保存结果
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    log_path = os.path.join(OUTPUT_DIR, f"valid_eval_results_manual_{timestamp}.json")
    with open(log_path, 'w', encoding='utf-8') as f:
        json.dump(results, f, indent=2, ensure_ascii=False)
    print(f"\n💾 详细结果已保存到: {log_path}")
    print("🎉 评估完成！")

if __name__ == "__main__":
    main()