from typing import List, Tuple, Dict, Any, Optional, Union
import datasets
from datasets import load_dataset, concatenate_datasets, Dataset
from PIL import Image
import os
import random
import logging
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

from torch.utils.data import Dataset
import torch
from torchvision import transforms
import json

def process_image(image, resolution: str = "mid", max_dim: int = 1344) -> Optional[Image.Image]:
    if image is None:
        return None
    if resolution == "high":
        image = image.resize((1344, 1344))
    elif resolution == "mid":
        image = image.resize((384, 384))
    elif resolution == "small":
        image = image.resize((224, 224))
    elif resolution == "low":
        image = image.resize((128, 128))
    else:
        cur_max_dim = max(image.size)
        if cur_max_dim > max_dim:
            # 保持宽高比进行缩放
            ratio = max_dim / cur_max_dim
            new_size = (int(image.width * ratio), int(image.height * ratio))
            image = image.resize(new_size)

    return image


class MMEBDataset(Dataset):  # 重命名自 MMEBTrainDataset
    def __init__(self, data_dir: str = "data/MMEB-train", 
                 subsets: List[str] = ["ImageNet_1K", "N24News", "VisDial"],
                 split: str = "original",
                 image_resolution: Optional[str] = None,
                 max_samples: Optional[int] = None):
        """
        初始化MMEB训练数据集
        
        Args:
            data_dir: 数据集目录
            subsets: 要加载的子集列表
            split: 数据集分割 (original/diverse_instruction)
            image_resolution: 图像分辨率 (high/mid/low)
            max_samples: 最大样本数，None表示加载所有样本
        """
        self.data_dir = data_dir
        self.subsets = subsets
        self.split = split
        self.image_resolution = image_resolution
        self.max_samples = max_samples
        
        # 加载数据集
        self.dataset = self._load_dataset()
        
        # 图像标记（根据MMEB数据集格式）
        self.image_token = "<|image_1|>"
        
        logger.info(f"成功加载MMEB数据集，包含{len(self.dataset)}个样本")

    def _load_dataset(self) -> Dataset:
        """加载并合并多个子集的数据集"""
        datasets_list = []
        
        for subset in self.subsets:
            logger.info(f"加载子集: {subset}")
            try:
                # 从HuggingFace加载数据集
                subset_data = load_dataset(
                    "data/MMEB-train", 
                    subset, 
                    split=self.split
                )
                
                # 如果设置了最大样本数，取前max_samples个样本
                if self.max_samples is not None and len(subset_data) > self.max_samples:
                    subset_data = subset_data.select(range(self.max_samples))
                
                datasets_list.append(subset_data)
            except Exception as e:
                logger.error(f"加载子集{subset}失败: {str(e)}")
        
        if not datasets_list:
            raise ValueError("未能加载任何数据集子集")
        
        # 合并所有数据集
        merged_dataset = concatenate_datasets(datasets_list)
        return merged_dataset

    def _get_image(self, img_path: str) -> Optional[Image.Image]:
        """获取并处理图像"""
        if not img_path:
            return None
        
        # 构建完整图像路径
        full_img_path = os.path.join(self.data_dir, img_path)
        
        try:
            image = Image.open(full_img_path).convert("RGB")
            return process_image(image, self.image_resolution)
        except Exception as e:
            logger.warning(f"加载图像{full_img_path}失败: {str(e)}")
            return None

    def __len__(self) -> int:
        return len(self.dataset)

    def __getitem__(self, idx: int) -> Dict[str, Any]:
        """获取单个样本"""
        data = self.dataset[idx]
        
        # 处理文本
        qry_text = data.get("qry", "")
        pos_text = data.get("pos_text", "")
        neg_text = data.get("neg_text", "")
        
        # 处理图像路径
        qry_image_path = data.get("qry_image_path", "")
        pos_image_path = data.get("pos_image_path", "")
        neg_image_path = data.get("neg_image_path", "")
        
        # 加载图像
        qry_image = self._get_image(qry_image_path)
        pos_image = self._get_image(pos_image_path)
        neg_image = self._get_image(neg_image_path) if neg_image_path else None
        
        # 确保至少有文本或图像
        if (not qry_text and not qry_image) or (not pos_text and not pos_image):
            logger.warning(f"样本{idx}包含空输入，跳过")
            # 返回下一个有效样本
            return self.__getitem__((idx + 1) % len(self))
        
        return {
            "query_text": qry_text,
            "query_image": qry_image,
            "query_image_path": qry_image_path,
            "pos_text": pos_text,
            "pos_image": pos_image,
            "pos_image_path": pos_image_path,
            "neg_text": neg_text,
            "neg_image": neg_image,
            "neg_image_path": neg_image_path
        }


class LLaVADataset(Dataset):
    def __init__(self, data_dir: str = "data/LLaVA-ReCap-558K-json",
                 json_file: str = "LLaVA-ReCap-558K.json",
                 image_resolution: Optional[str] = "small",
                 max_samples: Optional[int] = None):
        """
        初始化LLaVA数据集
        
        Args:
            data_dir: 数据集目录
            json_file: JSON文件名
            image_resolution: 图像分辨率 (high/mid/small/low)
            max_samples: 最大样本数，None表示加载所有样本
        """
        self.data_dir = data_dir
        self.json_file = json_file
        self.image_resolution = image_resolution
        self.max_samples = max_samples
        
        # 加载数据集
        self.dataset = self._load_dataset()
        
        # 图像标记
        self.image_token = "<|image_1|>"
        
        logger.info(f"成功加载LLaVA数据集，包含{len(self.dataset)}个样本")

    def _load_dataset(self) -> List[Dict]:
        """加载LLaVA数据集JSON文件"""
        json_path = os.path.join(self.data_dir, self.json_file)
        
        if not os.path.exists(json_path):
            raise ValueError(f"JSON文件不存在: {json_path}")
        
        try:
            with open(json_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            
            # 如果设置了最大样本数，限制样本数量
            if self.max_samples is not None and len(data) > self.max_samples:
                data = data[:self.max_samples]
            
            return data
        except Exception as e:
            logger.error(f"加载JSON文件失败: {str(e)}")
            raise

    def _get_image(self, image_path: str) -> Optional[Image.Image]:
        """获取并处理图像"""
        if not image_path:
            return None
        
        # 构建完整图像路径
        full_img_path = os.path.join(self.data_dir, image_path)
        
        try:
            image = Image.open(full_img_path).convert("RGB")
            return process_image(image, self.image_resolution)
        except Exception as e:
            logger.warning(f"加载图像{full_img_path}失败: {str(e)}")
            return None

    def __len__(self) -> int:
        return len(self.dataset)

    def __getitem__(self, idx: int) -> Dict[str, Any]:
        """获取单个样本"""
        data = self.dataset[idx]
        
        # 提取图像路径
        image_paths = data.get("images", [])
        # 使用第一个图像作为查询图像
        qry_image_path = image_paths[0] if image_paths else ""
        
        # 获取用户消息和助手消息
        messages = data.get("messages", [])
        user_message = ""
        assistant_message = ""
        
        for message in messages:
            if message.get("role") == "user":
                user_message = message.get("content", "")
            elif message.get("role") == "assistant":
                assistant_message = message.get("content", "")
        
        # 构建查询文本和正向文本
        # 移除assistant消息中的<image>标记
        qry_text = user_message.replace("<image>", self.image_token).strip()
        pos_text = assistant_message.replace("<image>", "").strip()
        
        # 由于LLaVA数据集没有负样本，这里设置为空字符串和None
        neg_text = ""
        neg_image_path = ""
        
        # 加载图像
        qry_image = self._get_image(qry_image_path)
        pos_image = qry_image  # 对于LLaVA数据集，正向图像与查询图像相同
        neg_image = None
        
        # 确保至少有文本或图像
        if (not qry_text and not qry_image) or (not pos_text and not pos_image):
            logger.warning(f"样本{idx}包含空输入，跳过")
            # 返回下一个有效样本
            return self.__getitem__((idx + 1) % len(self))
        
        return {
            "query_text": qry_text,
            "query_image": qry_image,
            "query_image_path": qry_image_path,
            "pos_text": pos_text,
            "pos_image": pos_image,
            "pos_image_path": qry_image_path,  # 正向图像路径与查询图像路径相同
            "neg_text": neg_text,
            "neg_image": neg_image,
            "neg_image_path": neg_image_path
        }



if __name__ == "__main__":
    # 示例用法
    try:
        # 创建数据集实例
        dataset = MMEBDataset(
            data_dir="data/MMEB-train",
            subsets=["DocVQA", "InfographicsVQA", "VisDial"],  # ["ImageNet_1K", "N24News", "VisDial"],
            split="original",
            image_resolution="mid",
            max_samples=100  # 仅加载100个样本用于测试
        )
        
        # 查验数据集
        inspect_dataset(dataset, num_samples=3)
        
        # 打印数据集大小
        print(f"数据集总样本数: {len(dataset)}")
        
        # 获取单个样本
        sample = dataset[0]
        print(f"样本键值: {sample.keys()}")
        print(f"查询文本: {sample['query_text']}")
        print(f"查询图像: {'已加载' if sample['query_image'] is not None else '未加载'}")
    except Exception as e:
        print(f"运行出错: {str(e)}")