#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
数据加载工具
"""

import os
import logging
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader, random_split
import cv2
from PIL import Image
import albumentations as A
from albumentations.pytorch import ToTensorV2


class MicroExpressionDataset(Dataset):
    """微表情数据集"""
    
    def __init__(self, data_root, split='train', config=None, transform=None):
        """
        初始化数据集
        
        Args:
            data_root (str): 数据根目录
            split (str): 数据集划分，'train', 'val', 或 'test'
            config (dict): 配置字典
            transform: 数据增强变换
        """
        self.logger = logging.getLogger()
        self.data_root = data_root
        self.split = split
        self.config = config if config is not None else {}
        self.transform = transform
        
        # 获取数据集配置
        self.dataset_name = self.config.get('dataset', {}).get('name', 'CASME2')
        self.num_frames = self.config.get('dataset', {}).get('num_frames', 16)
        self.frame_size = self.config.get('dataset', {}).get('frame_size', [224, 224])
        self.normalize = self.config.get('dataset', {}).get('normalize', True)
        
        # 标签映射
        self.label_map = self._get_label_map()
        
        # 加载样本
        self.samples = self._load_samples()
        self.logger.info(f"加载 {self.dataset_name} {split} 数据集: {len(self.samples)} 个样本")
    
    def _get_label_map(self):
        """获取标签映射"""
        # 根据不同数据集定义标签映射
        if self.dataset_name == 'CASME2':
            return {
                'happiness': 0,
                'surprise': 1,
                'disgust': 2,
                'repression': 3,
                'fear': 4,
                'sadness': 5,
                'others': 6
            }
        elif self.dataset_name == 'SMIC':
            return {
                'positive': 0,
                'negative': 1,
                'surprise': 2
            }
        elif self.dataset_name == 'SAMM':
            return {
                'happiness': 0,
                'surprise': 1,
                'anger': 2,
                'disgust': 3,
                'fear': 4,
                'contempt': 5,
                'sadness': 6,
                'others': 7
            }
        else:
            # 默认映射
            return {
                'happiness': 0,
                'surprise': 1,
                'anger': 2,
                'disgust': 3,
                'fear': 4,
                'sadness': 5,
                'contempt': 6,
                'others': 7
            }
    
    def _load_samples(self):
        """
        加载样本数据
        根据不同数据集格式，读取样本路径和标签
        
        Returns:
            list: 样本列表，每个样本包含路径和标签
        """
        samples = []
        
        try:
            # 首先尝试从预处理数据中加载
            processed_dir = os.path.join(self.data_root, f"processed_{self.dataset_name}")
            metadata_path = os.path.join(processed_dir, f"{self.split}_metadata.csv")
            
            if os.path.exists(metadata_path):
                # 从预处理元数据加载
                self.logger.info(f"从预处理元数据加载: {metadata_path}")
                metadata = pd.read_csv(metadata_path)
                
                for _, row in metadata.iterrows():
                    subject_id = row.get('subject_id', 'unknown')
                    sample_id = row.get('sample_id', 'unknown')
                    label_str = row.get('emotion', 'unknown')
                    label = self.label_map.get(label_str.lower(), -1)
                    
                    if label == -1:
                        self.logger.warning(f"未知标签: {label_str}，跳过")
                        continue
                    
                    # 获取样本目录
                    sample_dir = os.path.join(processed_dir, subject_id, sample_id)
                    
                    if os.path.exists(sample_dir):
                        samples.append({
                            'sample_dir': sample_dir,
                            'label': label,
                            'label_str': label_str,
                            'subject_id': subject_id,
                            'sample_id': sample_id
                        })
                    else:
                        self.logger.warning(f"样本目录不存在: {sample_dir}")
            else:
                # 如果未找到预处理数据，则从原始数据加载
                self.logger.warning(f"预处理元数据不存在: {metadata_path}，尝试加载原始数据")
                
                # 根据不同数据集格式加载
                raw_data_dir = os.path.join(self.data_root, self.dataset_name)
                if not os.path.exists(raw_data_dir):
                    self.logger.error(f"数据集目录不存在: {raw_data_dir}")
                    return []
                
                if self.dataset_name == 'CASME2':
                    # CASME2数据集格式
                    samples = self._load_casme2_samples(raw_data_dir)
                elif self.dataset_name == 'SMIC':
                    # SMIC数据集格式
                    samples = self._load_smic_samples(raw_data_dir)
                elif self.dataset_name == 'SAMM':
                    # SAMM数据集格式
                    samples = self._load_samm_samples(raw_data_dir)
                else:
                    self.logger.error(f"不支持的数据集: {self.dataset_name}")
        
        except Exception as e:
            self.logger.error(f"加载样本时出错: {str(e)}")
        
        return samples
    
    def _load_casme2_samples(self, data_dir):
        """加载CASME2数据集样本"""
        samples = []
        
        # CASME2元数据文件路径
        metadata_path = os.path.join(data_dir, 'CASME2-coding-20190701.xlsx')
        if not os.path.exists(metadata_path):
            self.logger.error(f"CASME2元数据文件不存在: {metadata_path}")
            return samples
        
        try:
            # 读取CASME2元数据
            metadata = pd.read_excel(metadata_path)
            
            # 根据split划分数据
            if self.split == 'train':
                metadata = metadata.sample(frac=0.7, random_state=42)
            elif self.split == 'val':
                all_data = metadata.sample(frac=1.0, random_state=42)
                train_size = int(len(all_data) * 0.7)
                val_size = int(len(all_data) * 0.1)
                metadata = all_data.iloc[train_size:train_size+val_size]
            elif self.split == 'test':
                all_data = metadata.sample(frac=1.0, random_state=42)
                train_val_size = int(len(all_data) * 0.8)
                metadata = all_data.iloc[train_val_size:]
            
            # 遍历元数据
            for _, row in metadata.iterrows():
                subject = f"sub{row['Subject']}"
                filename = row['Filename']
                emotion = row['Estimated Emotion']
                
                # 映射标签
                label_str = emotion.lower() if isinstance(emotion, str) else 'others'
                label = self.label_map.get(label_str, self.label_map.get('others', 0))
                
                # 构建样本路径
                sample_dir = os.path.join(data_dir, subject, filename)
                
                if os.path.exists(sample_dir):
                    samples.append({
                        'sample_dir': sample_dir,
                        'label': label,
                        'label_str': label_str,
                        'subject_id': subject,
                        'sample_id': filename
                    })
                else:
                    self.logger.warning(f"样本目录不存在: {sample_dir}")
        
        except Exception as e:
            self.logger.error(f"加载CASME2数据集时出错: {str(e)}")
        
        return samples
    
    def _load_smic_samples(self, data_dir):
        """加载SMIC数据集样本"""
        # 这里是示例实现，实际中需要根据SMIC数据集的具体格式调整
        samples = []
        
        # SMIC元数据文件路径
        metadata_path = os.path.join(data_dir, 'SMIC_label.xlsx')
        if not os.path.exists(metadata_path):
            self.logger.error(f"SMIC元数据文件不存在: {metadata_path}")
            return samples
        
        try:
            # 读取SMIC元数据
            metadata = pd.read_excel(metadata_path)
            
            # 根据split划分数据
            if self.split == 'train':
                metadata = metadata.sample(frac=0.7, random_state=42)
            elif self.split == 'val':
                all_data = metadata.sample(frac=1.0, random_state=42)
                train_size = int(len(all_data) * 0.7)
                val_size = int(len(all_data) * 0.1)
                metadata = all_data.iloc[train_size:train_size+val_size]
            elif self.split == 'test':
                all_data = metadata.sample(frac=1.0, random_state=42)
                train_val_size = int(len(all_data) * 0.8)
                metadata = all_data.iloc[train_val_size:]
            
            # 遍历元数据
            for _, row in metadata.iterrows():
                subject = f"s{row['subject']}"
                sample_id = row['sample']
                emotion = row['emotion']
                
                # 映射标签
                label_str = emotion.lower() if isinstance(emotion, str) else 'others'
                label = self.label_map.get(label_str, 0)
                
                # 构建样本路径
                sample_dir = os.path.join(data_dir, subject, sample_id)
                
                if os.path.exists(sample_dir):
                    samples.append({
                        'sample_dir': sample_dir,
                        'label': label,
                        'label_str': label_str,
                        'subject_id': subject,
                        'sample_id': sample_id
                    })
                else:
                    self.logger.warning(f"样本目录不存在: {sample_dir}")
        
        except Exception as e:
            self.logger.error(f"加载SMIC数据集时出错: {str(e)}")
        
        return samples
    
    def _load_samm_samples(self, data_dir):
        """加载SAMM数据集样本"""
        # 这里是示例实现，实际中需要根据SAMM数据集的具体格式调整
        samples = []
        
        # SAMM元数据文件路径
        metadata_path = os.path.join(data_dir, 'SAMM_label.xlsx')
        if not os.path.exists(metadata_path):
            self.logger.error(f"SAMM元数据文件不存在: {metadata_path}")
            return samples
        
        try:
            # 读取SAMM元数据
            metadata = pd.read_excel(metadata_path)
            
            # 根据split划分数据
            if self.split == 'train':
                metadata = metadata.sample(frac=0.7, random_state=42)
            elif self.split == 'val':
                all_data = metadata.sample(frac=1.0, random_state=42)
                train_size = int(len(all_data) * 0.7)
                val_size = int(len(all_data) * 0.1)
                metadata = all_data.iloc[train_size:train_size+val_size]
            elif self.split == 'test':
                all_data = metadata.sample(frac=1.0, random_state=42)
                train_val_size = int(len(all_data) * 0.8)
                metadata = all_data.iloc[train_val_size:]
            
            # 遍历元数据
            for _, row in metadata.iterrows():
                subject = f"{row['Subject']}"
                sample_id = f"{row['Filename']}"
                emotion = row['Emotion']
                
                # 映射标签
                label_str = emotion.lower() if isinstance(emotion, str) else 'others'
                label = self.label_map.get(label_str, self.label_map.get('others', 0))
                
                # 构建样本路径
                sample_dir = os.path.join(data_dir, subject, sample_id)
                
                if os.path.exists(sample_dir):
                    samples.append({
                        'sample_dir': sample_dir,
                        'label': label,
                        'label_str': label_str,
                        'subject_id': subject,
                        'sample_id': sample_id
                    })
                else:
                    self.logger.warning(f"样本目录不存在: {sample_dir}")
        
        except Exception as e:
            self.logger.error(f"加载SAMM数据集时出错: {str(e)}")
        
        return samples
    
    def __len__(self):
        """返回数据集长度"""
        return len(self.samples)
    
    def __getitem__(self, idx):
        """
        获取一个样本
        
        Args:
            idx (int): 样本索引
        
        Returns:
            dict: 包含帧序列和标签的字典
        """
        sample = self.samples[idx]
        
        try:
            # 获取样本目录
            sample_dir = sample['sample_dir']
            
            # 加载帧序列
            frames = self._load_frames(sample_dir)
            
            # 应用变换
            if self.transform is not None:
                frames = self._apply_transform(frames)
            
            # 构建返回字典
            result = {
                'frames': frames,  # 帧序列张量 [C, T, H, W] 或 [T, C, H, W]
                'label': sample['label'],  # 标签
                'label_str': sample['label_str'],  # 标签字符串
                'subject_id': sample['subject_id'],  # 主体ID
                'sample_id': sample['sample_id']  # 样本ID
            }
            
            return result
        
        except Exception as e:
            self.logger.error(f"获取样本 {idx} 时出错: {str(e)}")
            
            # 返回空样本
            return {
                'frames': torch.zeros(3, self.num_frames, *self.frame_size),
                'label': 0,
                'label_str': 'unknown',
                'subject_id': 'unknown',
                'sample_id': 'unknown'
            }
    
    def _load_frames(self, sample_dir):
        """
        加载帧序列
        
        Args:
            sample_dir (str): 样本目录
        
        Returns:
            torch.Tensor: 帧序列张量
        """
        # 检查是否为预处理后的npz文件
        npz_path = os.path.join(sample_dir, 'frames.npz')
        if os.path.exists(npz_path):
            # 从npz文件加载
            with np.load(npz_path) as data:
                frames = data['frames']
                # 转换为张量
                frames = torch.from_numpy(frames).float()
                return frames
        
        # 如果不是npz文件，从图像序列加载
        frame_files = sorted([f for f in os.listdir(sample_dir) if f.endswith(('.jpg', '.png', '.bmp'))])
        
        if len(frame_files) == 0:
            self.logger.warning(f"未找到图像文件: {sample_dir}")
            # 返回空张量
            return torch.zeros(3, self.num_frames, *self.frame_size)
        
        # 采样帧
        if len(frame_files) >= self.num_frames:
            # 均匀采样
            indices = np.linspace(0, len(frame_files) - 1, self.num_frames, dtype=int)
            selected_files = [frame_files[i] for i in indices]
        else:
            # 重复最后一帧
            selected_files = frame_files + [frame_files[-1]] * (self.num_frames - len(frame_files))
        
        # 加载图像
        frames = []
        for file in selected_files:
            img_path = os.path.join(sample_dir, file)
            img = cv2.imread(img_path)
            if img is None:
                self.logger.warning(f"无法读取图像: {img_path}")
                img = np.zeros((self.frame_size[0], self.frame_size[1], 3), dtype=np.uint8)
            else:
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                img = cv2.resize(img, (self.frame_size[1], self.frame_size[0]))
            frames.append(img)
        
        # 转换为NumPy数组
        frames = np.stack(frames, axis=0)  # [T, H, W, C]
        
        # 转置为[C, T, H, W]格式
        frames = frames.transpose(3, 0, 1, 2)
        
        # 归一化
        if self.normalize:
            frames = frames / 255.0
        
        # 转换为张量
        frames = torch.from_numpy(frames).float()
        
        return frames
    
    def _apply_transform(self, frames):
        """
        应用数据增强变换
        
        Args:
            frames (torch.Tensor): 帧序列张量 [C, T, H, W]
        
        Returns:
            torch.Tensor: 变换后的帧序列
        """
        # 转置为[T, H, W, C]用于处理
        frames = frames.permute(1, 2, 3, 0).numpy()
        
        # 对每一帧应用相同的变换
        if isinstance(self.transform, A.Compose):
            # 使用相同的种子确保所有帧应用相同的变换
            seed = np.random.randint(2147483647)
            transformed_frames = []
            
            for i in range(frames.shape[0]):
                np.random.seed(seed)
                transformed = self.transform(image=frames[i])
                transformed_frames.append(transformed['image'])
            
            # 堆叠变换后的帧
            transformed_frames = np.stack(transformed_frames, axis=0)
            
            # 如果输出是PyTorch张量，转换为[C, T, H, W]格式
            if isinstance(transformed_frames[0], torch.Tensor):
                transformed_frames = torch.stack(transformed_frames, dim=0)
                transformed_frames = transformed_frames.permute(1, 0, 2, 3)
            else:
                # 否则转换NumPy数组为[C, T, H, W]格式
                transformed_frames = transformed_frames.transpose(3, 0, 1, 2)
                transformed_frames = torch.from_numpy(transformed_frames).float()
            
            return transformed_frames
        else:
            # 如果无法应用Albumentations变换，返回原始帧
            return frames.permute(3, 0, 1, 2)  # 转回[C, T, H, W]


def get_transforms(config, split):
    """
    获取数据增强变换
    
    Args:
        config (dict): 配置字典
        split (str): 数据集划分
    
    Returns:
        albumentations.Compose: 变换组合
    """
    frame_size = config.get('dataset', {}).get('frame_size', [224, 224])
    use_augmentation = config.get('dataset', {}).get('use_augmentation', True)
    
    if split == 'train' and use_augmentation:
        # 训练集使用数据增强
        return A.Compose([
            A.Resize(height=frame_size[0], width=frame_size[1]),
            A.HorizontalFlip(p=0.5),
            A.RandomBrightnessContrast(p=0.2),
            A.ShiftScaleRotate(shift_limit=0.05, scale_limit=0.05, rotate_limit=10, p=0.3),
            A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            ToTensorV2()
        ])
    else:
        # 验证集和测试集只进行调整大小和归一化
        return A.Compose([
            A.Resize(height=frame_size[0], width=frame_size[1]),
            A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            ToTensorV2()
        ])


def create_dataloaders(config):
    """
    创建数据加载器
    
    Args:
        config (dict): 配置字典
    
    Returns:
        tuple: (train_loader, val_loader, test_loader)
    """
    logger = logging.getLogger()
    
    # 获取数据集配置
    data_dir = config.get('dataset', {}).get('data_dir', './data')
    batch_size = config.get('training', {}).get('batch_size', 16)
    num_workers = config.get('training', {}).get('num_workers', 4)
    
    # 创建数据集
    logger.info("创建训练数据集...")
    train_dataset = MicroExpressionDataset(
        data_root=data_dir,
        split='train',
        config=config,
        transform=get_transforms(config, 'train')
    )
    
    logger.info("创建验证数据集...")
    val_dataset = MicroExpressionDataset(
        data_root=data_dir,
        split='val',
        config=config,
        transform=get_transforms(config, 'val')
    )
    
    logger.info("创建测试数据集...")
    test_dataset = MicroExpressionDataset(
        data_root=data_dir,
        split='test',
        config=config,
        transform=get_transforms(config, 'test')
    )
    
    # 创建数据加载器
    train_loader = DataLoader(
        dataset=train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
        pin_memory=True,
        drop_last=True
    )
    
    val_loader = DataLoader(
        dataset=val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=True,
        drop_last=False
    )
    
    test_loader = DataLoader(
        dataset=test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=True,
        drop_last=False
    )
    
    logger.info(f"数据加载器创建完成 - 训练: {len(train_loader.dataset)} 样本, 验证: {len(val_loader.dataset)} 样本, 测试: {len(test_loader.dataset)} 样本")
    
    return train_loader, val_loader, test_loader 