import os
import torch
from torch.utils.data import Dataset
from PIL import Image
import json

class ValidDataset(Dataset):
    """Valid数据集 - 使用简化推理方式，避免复杂预处理"""
    
    def __init__(self, data_list, image_root=None, tokenizer=None, transform=None, task_type="caption"):
        self.data = data_list
        self.tokenizer = tokenizer
        self.task_type = task_type
        
        # 硬编码图片根目录
        self.image_root = "/root/autodl-tmp/valid/valid/images"
        
        # 备用路径
        backup_paths = [
            "/root/autodl-tmp/valid/valid/images",
            "/root/autodl-tmp/FM9G4B-V (1)/valid/valid/images", 
            "/root/autodl-tmp/valid/images",
            "/root/autodl-tmp/images"
        ]
        
        if not os.path.exists(self.image_root):
            for backup_path in backup_paths:
                if os.path.exists(backup_path):
                    self.image_root = backup_path
                    print(f"✅ Using backup image root: {self.image_root}")
                    break
        
        print(f"✅ Using image root: {self.image_root}")
        
        # 过滤有效数据
        self.valid_data = []
        self._filter_valid_items()

    def _filter_valid_items(self):
        """过滤有效数据项"""
        for idx, item in enumerate(self.data):
            # 尝试多种可能的图片字段名
            image_name = (item.get("image") or 
                         item.get("Image") or 
                         item.get("image_id") or
                         item.get("Image1") or
                         item.get("Image2") or
                         item.get("file_name"))
            
            if not image_name:
                print(f"⚠️  Skipping item {idx}: No image field found")
                continue
                
            # 检查图片文件是否存在
            image_path = os.path.join(self.image_root, image_name)
            if os.path.exists(image_path):
                self.valid_data.append(item)
            else:
                print(f"⚠️  Skipping item {idx}: Image not found - {image_path}")
        
        print(f"📊 Filtered dataset: {len(self.valid_data)}/{len(self.data)} valid items")

    def __len__(self):
        return len(self.valid_data)

    def __getitem__(self, idx):
        item = self.valid_data[idx]
        
        # 🔧 关键修改：返回原始数据，不做复杂预处理
        try:
            # 获取图片名称
            image_name = None
            if "Image1" in item and item["Image1"]:
                image_name = item["Image1"]
            elif "Image2" in item and item["Image2"]:
                image_name = item["Image2"]
            elif "Image" in item and item["Image"]:
                image_name = item["Image"]
            elif "image" in item and item["image"]:
                image_name = item["image"]
            elif "image_id" in item and item["image_id"]:
                image_name = item["image_id"]
            elif "file_name" in item and item["file_name"]:
                image_name = item["file_name"]
            
            if not image_name:
                raise ValueError("No valid image field found")
            
            image_path = os.path.join(self.image_root, image_name)
            
            # 🔧 简单加载图片，完全模仿测试脚本的方式
            image = Image.open(image_path).convert("RGB")
            
            # 🔧 移除resize操作 - 保持与测试脚本一致
            
            # 获取问题和答案
            question = item.get("Text", item.get("question", "Describe this image in detail."))
            ground_truth = item.get("Ground truth", item.get("answer", ""))
            answer_choices = item.get("Answer choices", item.get("answer_choices", []))
            
            # 🔧 构造类似测试脚本的消息格式
            if answer_choices:  # VQA任务
                # 构建选择题格式
                choices_text = "\n".join(answer_choices)
                full_question = f"{question}\n\nOptions:\n{choices_text}\n\nAnswer:"
                task_type = "vqa"
            else:  # Caption任务
                full_question = question
                task_type = "caption"
            
            # 🔧 关键：返回用于 model.chat() 的格式
            return {
                'image': image,  # PIL Image对象
                'question': full_question,
                'ground_truth': ground_truth,
                'answer_choices': answer_choices,
                'image_id': image_name,
                'task_type': task_type,
                'msgs': [{'role': 'user', 'content': [image, full_question]}],  # 直接可用的消息格式
                
                # 🔧 为了兼容性，提供默认的tensor格式（但实际不会用到）
                'input_ids': torch.tensor([1], dtype=torch.long),
                'position_ids': torch.tensor([0], dtype=torch.long),
                'labels': torch.tensor([-100], dtype=torch.long),
                'attention_mask': torch.tensor([1], dtype=torch.bool),
                'pixel_values': [torch.zeros(3, 224, 224)],  # 占位符
                'tgt_sizes': torch.tensor([[16, 16]], dtype=torch.int32),
                'image_bound': torch.zeros(1, 2, dtype=torch.long),
            }
            
        except Exception as e:
            print(f"❌ Error processing item {idx}: {e}")
            # 返回安全的默认数据
            return {
                'image': None,
                'question': "",
                'ground_truth': "",
                'answer_choices': [],
                'image_id': "",
                'task_type': "caption",
                'msgs': [{'role': 'user', 'content': ["", "test"]}],
                'error': str(e),
                
                # 兼容性数据
                'input_ids': torch.tensor([1], dtype=torch.long),
                'position_ids': torch.tensor([0], dtype=torch.long),
                'labels': torch.tensor([-100], dtype=torch.long),
                'attention_mask': torch.tensor([1], dtype=torch.bool),
                'pixel_values': [torch.zeros(3, 224, 224)],
                'tgt_sizes': torch.tensor([[16, 16]], dtype=torch.int32),
                'image_bound': torch.zeros(1, 2, dtype=torch.long),
            }

def simple_valid_collator(examples):
    """简化的验证集数据整理器"""
    return {
        'images': [ex['image'] for ex in examples],
        'questions': [ex['question'] for ex in examples],
        'ground_truths': [ex['ground_truth'] for ex in examples],
        'answer_choices': [ex['answer_choices'] for ex in examples],
        'image_ids': [ex['image_id'] for ex in examples],
        'task_types': [ex['task_type'] for ex in examples],
        'msgs': [ex['msgs'] for ex in examples],
        
        # 兼容性数据（训练时不会用到这些）
        'input_ids': torch.stack([ex['input_ids'] for ex in examples]),
        'position_ids': torch.stack([ex['position_ids'] for ex in examples]),
        'labels': torch.stack([ex['labels'] for ex in examples]),
        'attention_mask': torch.stack([ex['attention_mask'] for ex in examples]),
        'pixel_values': [ex['pixel_values'] for ex in examples],
        'tgt_sizes': torch.stack([ex['tgt_sizes'] for ex in examples]),
        'image_bound': [ex['image_bound'] for ex in examples],
    }