import os
import json
import numpy as np
import yaml
import torch
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from transformers import CLIPVisionModel, CLIPImageProcessor, LlamaForCausalLM, LlamaTokenizer
from llava.model import LlavaLlamaForCausalLM
from llava.conversation import conv_templates

# 获取当前文件的绝对路径
current_dir = os.path.dirname(os.path.abspath(__file__))
# 获取项目根目录
root_dir = os.path.dirname(current_dir)

class BaseEmotionDataset(Dataset):
    def __init__(self, data_root, data_file, image_processor, tokenizer, emotion_map=None):
        self.data_root = data_root
        self.image_processor = image_processor
        self.tokenizer = tokenizer
        
        # 如果没有提供情感映射，使用默认的emoset映射
        self.emotion_map = emotion_map or {
            'amusement': 0, 'anger': 1, 'awe': 2, 'contentment': 3,
            'disgust': 4, 'excitement': 5, 'fear': 6, 'sadness': 7
        }
        
        # 设置会话模板
        self.conv = conv_templates["llava_v1"].copy()
        
        # 获取配置文件
        config_path = os.path.join(root_dir, 'configs', 'config.yaml')
        with open(config_path, 'r') as f:
            self.config = yaml.safe_load(f)
            
        # 加载数据集（子类需要实现）
        self.annotations = self.load_annotations(data_file)

    def load_annotations(self, data_file):
        """
        加载数据集标注文件
        子类需要重写此方法以支持不同的数据集格式
        返回格式应为: [(emotion, image_path, optional_data), ...]
        """
        raise NotImplementedError("子类必须实现load_annotations方法")

    def get_image_path(self, relative_path):
        """
        获取图片的完整路径
        子类可以重写此方法以支持不同的图片路径格式
        """
        return os.path.join(self.data_root, relative_path)

    def get_prompt(self):
        """
        获取提示文本
        子类可以重写此方法以支持不同的提示格式
        """
        emotions = list(self.emotion_map.keys())
        return f"What is the emotion expressed in this image? Please choose from: {', '.join(emotions)}."

    def __len__(self):
        return len(self.annotations)

    def __getitem__(self, idx):
        # 获取数据项
        item = self.annotations[idx]
        emotion, image_path = item[0], item[1]
        
        # 获取完整图片路径
        image_path = self.get_image_path(image_path)
        
        try:
            image = Image.open(image_path).convert('RGB')
        except Exception as e:
            print(f"Error loading image {image_path}: {e}")
            image = Image.new('RGB', (self.config['data']['image_size'], self.config['data']['image_size']), color='black')
        
        # 处理图像
        vision_x = self.image_processor(
            image, 
            return_tensors="pt",
            size={"height": self.config['data']['image_size'], "width": self.config['data']['image_size']}
        )["pixel_values"][0]
        
        # 构建提示文本
        prompt = self.get_prompt()
        self.conv.messages = []
        self.conv.append_message(self.conv.roles[0], prompt)
        self.conv.append_message(self.conv.roles[1], None)
        prompt = self.conv.get_prompt()
        
        # 处理文本
        input_ids = self.tokenizer(
            prompt,
            return_tensors="pt",
            padding="longest",
            max_length=512,
            truncation=True,
        ).input_ids[0]
        
        # 获取标签
        label = self.emotion_map[emotion]
        
        return {
            'input_ids': input_ids,
            'pixel_values': vision_x,
            'labels': torch.tensor(label, dtype=torch.long)
        }

class EmoSetDataset(BaseEmotionDataset):
    """Emoset数据集的具体实现"""
    def load_annotations(self, data_file):
        with open(data_file, 'r', encoding='utf-8') as f:
            annotations = json.load(f)
        return annotations

    def get_image_path(self, image_path):
        # 修正图片路径（将'image'改为'images'）
        image_path = image_path.replace('image/', 'images/')
        return super().get_image_path(image_path)

class CustomEmotionDataset(BaseEmotionDataset):
    """自定义数据集的示例实现"""
    def load_annotations(self, data_file):
        """
        示例：假设数据文件格式为CSV，每行包含：image_path,emotion
        """
        annotations = []
        with open(data_file, 'r', encoding='utf-8') as f:
            for line in f:
                image_path, emotion = line.strip().split(',')
                annotations.append((emotion, image_path))
        return annotations

def prepare_dataloaders(config, dataset_class=EmoSetDataset, emotion_map=None):
    """
    准备数据加载器
    :param config: 配置字典
    :param dataset_class: 数据集类（默认为EmoSetDataset）
    :param emotion_map: 自定义情感标签映射（可选）
    """
    # 加载处理器
    image_processor = CLIPImageProcessor.from_pretrained(
        config['model'].get('vision_tower', "openai/clip-vit-large-patch14-336"),
        torch_dtype=torch.float16,
        size={"height": config['data']['image_size'], "width": config['data']['image_size']}
    )
    
    tokenizer = LlamaTokenizer.from_pretrained(
        config['model']['pretrained_path']
    )
    
    # 创建数据集
    train_dataset = dataset_class(
        config['data']['emoset_path'],
        config['data']['train_data_path'],
        image_processor,
        tokenizer,
        emotion_map
    )
    
    val_dataset = dataset_class(
        config['data']['emoset_path'],
        config['data']['val_data_path'],
        image_processor,
        tokenizer,
        emotion_map
    )
    
    test_dataset = dataset_class(
        config['data']['emoset_path'],
        config['data']['test_data_path'],
        image_processor,
        tokenizer,
        emotion_map
    )
    
    print(f"\n数据集信息:")
    print(f"训练集样本数: {len(train_dataset)}")
    print(f"验证集样本数: {len(val_dataset)}")
    print(f"测试集样本数: {len(test_dataset)}")
    
    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=config['data']['batch_size'],
        shuffle=True,
        num_workers=config['data']['num_workers'],
        pin_memory=config['data'].get('pin_memory', True),
        prefetch_factor=config['data'].get('prefetch_factor', 2),
        persistent_workers=config['data'].get('persistent_workers', True)
    )
    
    val_loader = DataLoader(
        val_dataset,
        batch_size=config['data']['batch_size'],
        shuffle=False,
        num_workers=config['data']['num_workers'],
        pin_memory=config['data'].get('pin_memory', True),
        prefetch_factor=config['data'].get('prefetch_factor', 2),
        persistent_workers=config['data'].get('persistent_workers', True)
    )
    
    test_loader = DataLoader(
        test_dataset,
        batch_size=config['data']['batch_size'],
        shuffle=False,
        num_workers=config['data']['num_workers'],
        pin_memory=config['data'].get('pin_memory', True),
        prefetch_factor=config['data'].get('prefetch_factor', 2),
        persistent_workers=config['data'].get('persistent_workers', True)
    )
    
    return train_loader, val_loader, test_loader

def get_emotion_name(label, emotion_map=None):
    """根据标签ID获取情感名称"""
    if emotion_map is None:
        emotion_map = {
            'amusement': 0, 'anger': 1, 'awe': 2, 'contentment': 3,
            'disgust': 4, 'excitement': 5, 'fear': 6, 'sadness': 7
        }
    reverse_map = {v: k for k, v in emotion_map.items()}
    return reverse_map.get(label, "unknown")

if __name__ == '__main__':
    # 加载配置
    config_path = os.path.join(root_dir, 'configs', 'config.yaml')
    print(f"正在加载配置文件: {config_path}")
    
    with open(config_path, 'r') as f:
        config = yaml.safe_load(f)
    
    # 准备数据加载器
    train_loader, val_loader, test_loader = prepare_dataloaders(config)
    print(f"\n数据加载器信息:")
    print(f"训练集批次数: {len(train_loader)}")
    print(f"验证集批次数: {len(val_loader)}")
    print(f"测试集批次数: {len(test_loader)}")
    
    # 测试第一个批次
    print("\n正在加载第一个批次...")
    batch = next(iter(train_loader))
    print("\n数据批次信息:")
    print(f"输入ID形状: {batch['input_ids'].shape}")
    print(f"图像特征形状: {batch['pixel_values'].shape}")
    print(f"标签形状: {batch['labels'].shape}")
    
    # 打印一些样本信息
    print("\n标签分布:")
    labels = batch['labels'].numpy()
    unique_labels, counts = np.unique(labels, return_counts=True)
    for label, count in zip(unique_labels, counts):
        emotion = get_emotion_name(label)
        print(f"标签 {label} ({emotion}): {count}个样本") 