#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DEAP数据集加载器

DEAP数据集包含32个被试的EEG和外周生理信号数据，每个被试观看40个情感视频片段。
数据已经过预处理，包括：
- 降采样到128Hz
- 带通滤波(4-45Hz)
- 眼电伪迹去除
- 基线校正

数据格式：
- EEG: 32通道
- 外周信号: 8通道 (EOG, EMG, GSR, 呼吸, 血容积, 温度等)
- 标签: valence(效价), arousal(唤醒), dominance(支配), liking(喜欢)
"""

import os
import pickle
import numpy as np
from typing import Dict, List, Tuple, Optional, Union
import logging

# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class DEAPLoader:
    """DEAP数据集加载器类"""
    
    def __init__(self, data_dir: str = "DEAP_data_preprocessed"):
        """
        初始化DEAP数据加载器
        
        Args:
            data_dir (str): DEAP预处理数据目录路径
        """
        self.data_dir = data_dir
        self.n_subjects = 32
        self.n_trials = 40
        self.n_channels = 40  # 32 EEG + 8 外周信号
        self.n_eeg_channels = 32
        self.n_peripheral_channels = 8
        self.sampling_rate = 128  # Hz
        self.trial_length = 63  # 秒 (3秒基线 + 60秒视频)
        self.n_timepoints = 8064  # 63秒 * 128Hz
        
        # 通道名称
        self.eeg_channels = [
            'Fp1', 'AF3', 'F3', 'F7', 'FC5', 'FC1', 'C3', 'T7',
            'CP5', 'CP1', 'P3', 'P7', 'PO3', 'O1', 'Oz', 'Pz',
            'Fp2', 'AF4', 'F4', 'F8', 'FC6', 'FC2', 'C4', 'T8',
            'CP6', 'CP2', 'P4', 'P8', 'PO4', 'O2', 'Cz', 'Fz'
        ]
        
        self.peripheral_channels = [
            'hEOG', 'vEOG', 'zEMG', 'tEMG', 'GSR', 'Respiration', 'Plethysmograph', 'Temperature'
        ]
        
        # 标签名称
        self.label_names = ['valence', 'arousal', 'dominance', 'liking']
        
        # 验证数据目录
        self._validate_data_dir()
    
    def _validate_data_dir(self):
        """验证数据目录是否存在且包含所需文件"""
        if not os.path.exists(self.data_dir):
            raise FileNotFoundError(f"数据目录不存在: {self.data_dir}")
        
        # 检查被试文件
        missing_files = []
        for i in range(1, self.n_subjects + 1):
            file_path = os.path.join(self.data_dir, f"s{i:02d}.dat")
            if not os.path.exists(file_path):
                missing_files.append(file_path)
        
        if missing_files:
            logger.warning(f"缺少以下文件: {missing_files}")
    
    def load_subject(self, subject_id: int, encoding: str = 'latin-1') -> Dict[str, np.ndarray]:
        """
        加载单个被试的数据
        
        Args:
            subject_id (int): 被试ID (1-32)
            encoding (str): 文件编码格式
            
        Returns:
            Dict[str, np.ndarray]: 包含'data'和'labels'的字典
                - data: shape (40, 40, 8064) - (试次, 通道, 时间点)
                - labels: shape (40, 4) - (试次, 标签维度)
        """
        if not 1 <= subject_id <= self.n_subjects:
            raise ValueError(f"被试ID必须在1-{self.n_subjects}之间，得到: {subject_id}")
        
        file_path = os.path.join(self.data_dir, f"s{subject_id:02d}.dat")
        
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"被试文件不存在: {file_path}")
        
        try:
            with open(file_path, 'rb') as f:
                data = pickle.load(f, encoding=encoding)
            
            logger.info(f"成功加载被试 {subject_id} 的数据")
            logger.info(f"数据形状: {data['data'].shape}, 标签形状: {data['labels'].shape}")
            
            return data
            
        except Exception as e:
            logger.error(f"加载被试 {subject_id} 数据时出错: {e}")
            raise
    
    def load_all_subjects(self, subject_ids: Optional[List[int]] = None, 
                         encoding: str = 'latin-1') -> Dict[int, Dict[str, np.ndarray]]:
        """
        加载多个被试的数据
        
        Args:
            subject_ids (List[int], optional): 要加载的被试ID列表，默认加载所有被试
            encoding (str): 文件编码格式
            
        Returns:
            Dict[int, Dict[str, np.ndarray]]: 被试ID为键的字典，值为每个被试的数据
        """
        if subject_ids is None:
            subject_ids = list(range(1, self.n_subjects + 1))
        
        all_data = {}
        successful_loads = 0
        
        for subject_id in subject_ids:
            try:
                all_data[subject_id] = self.load_subject(subject_id, encoding)
                successful_loads += 1
            except Exception as e:
                logger.warning(f"跳过被试 {subject_id}: {e}")
        
        logger.info(f"成功加载 {successful_loads}/{len(subject_ids)} 个被试的数据")
        return all_data
    
    def get_eeg_data(self, data: np.ndarray) -> np.ndarray:
        """
        提取EEG数据（前32个通道）
        
        Args:
            data (np.ndarray): 完整数据 shape (n_trials, 40, n_timepoints)
            
        Returns:
            np.ndarray: EEG数据 shape (n_trials, 32, n_timepoints)
        """
        return data[:, :self.n_eeg_channels, :]
    
    def get_peripheral_data(self, data: np.ndarray) -> np.ndarray:
        """
        提取外周生理信号数据（后8个通道）
        
        Args:
            data (np.ndarray): 完整数据 shape (n_trials, 40, n_timepoints)
            
        Returns:
            np.ndarray: 外周信号数据 shape (n_trials, 8, n_timepoints)
        """
        return data[:, self.n_eeg_channels:, :]
    
    def get_baseline_data(self, data: np.ndarray) -> np.ndarray:
        """
        提取基线数据（前3秒）
        
        Args:
            data (np.ndarray): 完整数据
            
        Returns:
            np.ndarray: 基线数据 shape (..., 384) # 3秒 * 128Hz
        """
        baseline_samples = 3 * self.sampling_rate  # 384个样本点
        return data[..., :baseline_samples]
    
    def get_stimulus_data(self, data: np.ndarray) -> np.ndarray:
        """
        提取刺激数据（3-63秒，即视频播放期间）
        
        Args:
            data (np.ndarray): 完整数据
            
        Returns:
            np.ndarray: 刺激数据 shape (..., 7680) # 60秒 * 128Hz
        """
        baseline_samples = 3 * self.sampling_rate
        return data[..., baseline_samples:]
    
    def get_data_statistics(self, data: Dict[str, np.ndarray]) -> Dict[str, any]:
        """
        获取数据统计信息
        
        Args:
            data (Dict): 包含'data'和'labels'的数据字典
            
        Returns:
            Dict: 统计信息
        """
        eeg_data = self.get_eeg_data(data['data'])
        peripheral_data = self.get_peripheral_data(data['data'])
        labels = data['labels']
        
        stats = {
            'data_shape': data['data'].shape,
            'labels_shape': labels.shape,
            'eeg_data_range': (eeg_data.min(), eeg_data.max()),
            'eeg_data_mean': eeg_data.mean(),
            'eeg_data_std': eeg_data.std(),
            'peripheral_data_range': (peripheral_data.min(), peripheral_data.max()),
            'labels_range': {
                name: (labels[:, i].min(), labels[:, i].max()) 
                for i, name in enumerate(self.label_names)
            },
            'labels_mean': {
                name: labels[:, i].mean() 
                for i, name in enumerate(self.label_names)
            },
            'labels_std': {
                name: labels[:, i].std() 
                for i, name in enumerate(self.label_names)
            }
        }
        
        return stats
    
    def print_dataset_info(self):
        """打印数据集基本信息"""
        print("=" * 60)
        print("DEAP数据集信息")
        print("=" * 60)
        print(f"被试数量: {self.n_subjects}")
        print(f"每个被试的试次数: {self.n_trials}")
        print(f"通道总数: {self.n_channels}")
        print(f"  - EEG通道: {self.n_eeg_channels}")
        print(f"  - 外周信号通道: {self.n_peripheral_channels}")
        print(f"采样率: {self.sampling_rate} Hz")
        print(f"试次长度: {self.trial_length} 秒")
        print(f"时间点数: {self.n_timepoints}")
        print(f"标签维度: {len(self.label_names)} ({', '.join(self.label_names)})")
        print()
        print("EEG通道名称:")
        for i, ch in enumerate(self.eeg_channels):
            print(f"  {i+1:2d}: {ch}")
        print()
        print("外周信号通道:")
        for i, ch in enumerate(self.peripheral_channels):
            print(f"  {i+33:2d}: {ch}")
        print("=" * 60)
    
    def create_dataset_splits(self, data: Dict[str, np.ndarray], 
                            train_ratio: float = 0.7, 
                            val_ratio: float = 0.15,
                            test_ratio: float = 0.15,
                            random_state: int = 42) -> Dict[str, Dict[str, np.ndarray]]:
        """
        创建训练/验证/测试数据集分割
        
        Args:
            data (Dict): 数据字典
            train_ratio (float): 训练集比例
            val_ratio (float): 验证集比例  
            test_ratio (float): 测试集比例
            random_state (int): 随机种子
            
        Returns:
            Dict: 包含train/val/test分割的数据
        """
        if abs(train_ratio + val_ratio + test_ratio - 1.0) > 1e-6:
            raise ValueError("训练/验证/测试比例之和必须等于1")
        
        np.random.seed(random_state)
        n_trials = data['data'].shape[0]
        indices = np.random.permutation(n_trials)
        
        train_end = int(n_trials * train_ratio)
        val_end = train_end + int(n_trials * val_ratio)
        
        train_idx = indices[:train_end]
        val_idx = indices[train_end:val_end]
        test_idx = indices[val_end:]
        
        splits = {
            'train': {
                'data': data['data'][train_idx],
                'labels': data['labels'][train_idx]
            },
            'val': {
                'data': data['data'][val_idx],
                'labels': data['labels'][val_idx]
            },
            'test': {
                'data': data['data'][test_idx],
                'labels': data['labels'][test_idx]
            }
        }
        
        logger.info(f"数据分割完成:")
        logger.info(f"  训练集: {len(train_idx)} 试次")
        logger.info(f"  验证集: {len(val_idx)} 试次")
        logger.info(f"  测试集: {len(test_idx)} 试次")
        
        return splits


def main():
    """示例用法"""
    # 创建数据加载器
    loader = DEAPLoader()
    
    # 打印数据集信息
    loader.print_dataset_info()
    
    # 加载单个被试数据
    print("\n加载被试1的数据...")
    subject_data = loader.load_subject(1)
    
    # 获取统计信息
    stats = loader.get_data_statistics(subject_data)
    print("\n数据统计信息:")
    for key, value in stats.items():
        print(f"{key}: {value}")
    
    # 提取EEG数据
    eeg_data = loader.get_eeg_data(subject_data['data'])
    print(f"\nEEG数据形状: {eeg_data.shape}")
    
    # 提取外周信号数据
    peripheral_data = loader.get_peripheral_data(subject_data['data'])
    print(f"外周信号数据形状: {peripheral_data.shape}")
    
    # 创建数据集分割
    splits = loader.create_dataset_splits(subject_data)
    print(f"\n数据集分割:")
    for split_name, split_data in splits.items():
        print(f"{split_name}: 数据{split_data['data'].shape}, 标签{split_data['labels'].shape}")


class EEGMatrixPreprocessor:
    """
    EEG数据预处理器，用于将EEG数据转换为9x9矩阵格式
    
    主要功能：
    1. 按1秒时间窗口分割EEG数据
    2. 基线校正（使用前3秒基线数据的平均值）
    3. 最大最小值归一化
    4. 根据10-20导联系统构建9x9 EEG矩阵
    """
    
    def __init__(self, sampling_rate: int = 128):
        """
        初始化EEG矩阵预处理器
        
        Args:
            sampling_rate (int): 采样率，默认128Hz
        """
        self.sampling_rate = sampling_rate
        self.window_size = sampling_rate  # 1秒的时间窗口
        self.baseline_duration = 3  # 基线持续时间（秒）
        self.baseline_samples = self.baseline_duration * sampling_rate  # 384个样本点
        
        # 根据10-20国际标准导联系统定义32个EEG通道到9x9矩阵的映射
        # 对应DEAP数据集的32个EEG通道顺序
        self.eeg_channels = [
            'Fp1', 'AF3', 'F3', 'F7', 'FC5', 'FC1', 'C3', 'T7',
            'CP5', 'CP1', 'P3', 'P7', 'PO3', 'O1', 'Oz', 'Pz',
            'Fp2', 'AF4', 'F4', 'F8', 'FC6', 'FC2', 'C4', 'T8',
            'CP6', 'CP2', 'P4', 'P8', 'PO4', 'O2', 'Cz', 'Fz'
        ]
        
        # 创建9x9矩阵的通道映射
        self._create_channel_mapping()
    
    def _create_channel_mapping(self):
        """创建32个EEG通道到9x9矩阵位置的映射"""
        # 根据DEAP数据集的32个EEG通道在10-20导联系统中的位置创建9x9矩阵布局
        # 这个布局专门针对DEAP数据集中实际存在的32个通道进行优化
        self.matrix_layout = [
            [None, None, None, 'Fp1', None, 'Fp2', None, None, None],
            [None, None, 'AF3', None, 'Fz',  None, 'AF4', None, None],
            ['F7',  None, 'F3',  None, None, None,  'F4',  None, 'F8'],
            [None, 'FC5', None, 'FC1', None, 'FC2', None, 'FC6', None],
            ['T7',  None, 'C3',  None, 'Cz',  None, 'C4',  None, 'T8'],
            [None, 'CP5', None, 'CP1', None, 'CP2', None, 'CP6', None],
            ['P7',  None, 'P3',  None, 'Pz',  None, 'P4',  None, 'P8'],
            [None, None, 'PO3', None, None, None, 'PO4', None, None],
            [None, None, None,  'O1',  'Oz',  'O2', None, None, None]
        ]
        
        # 创建从DEAP通道名到矩阵位置的映射
        self.channel_to_position = {}
        for i, row in enumerate(self.matrix_layout):
            for j, channel in enumerate(row):
                if channel is not None:
                    self.channel_to_position[channel] = (i, j)
        
        # 验证所有DEAP通道都能找到位置
        missing_channels = []
        for channel_name in self.eeg_channels:
            if channel_name not in self.channel_to_position:
                missing_channels.append(channel_name)
        
        if missing_channels:
            logger.warning(f"以下通道在矩阵布局中未找到对应位置: {missing_channels}")
        
        # 创建从DEAP通道索引到矩阵位置的映射
        self.channel_idx_to_position = {}
        for idx, channel_name in enumerate(self.eeg_channels):
            if channel_name in self.channel_to_position:
                self.channel_idx_to_position[idx] = self.channel_to_position[channel_name]
            else:
                # 对于在布局中找不到的通道，分配到最近的空位置
                self.channel_idx_to_position[idx] = self._find_nearest_position(channel_name, idx)
    
    def _find_nearest_position(self, channel_name: str, channel_idx: int) -> Tuple[int, int]:
        """为没有标准位置的通道找到最近的空位置"""
        # 找到矩阵中的所有空位置
        empty_positions = []
        for i, row in enumerate(self.matrix_layout):
            for j, channel in enumerate(row):
                if channel is None:
                    empty_positions.append((i, j))
        
        if not empty_positions:
            # 如果没有空位置，返回中心位置（会覆盖）
            logger.warning(f"矩阵中没有空位置，通道 {channel_name} 将被放置在中心位置")
            return (4, 4)
        
        # 根据通道索引分配空位置（简单的循环分配）
        position_idx = channel_idx % len(empty_positions)
        assigned_position = empty_positions[position_idx]
        
        logger.info(f"通道 {channel_name} 被分配到位置 {assigned_position}")
        return assigned_position
    
    def split_into_time_windows(self, eeg_data: np.ndarray) -> np.ndarray:
        """
        将EEG数据按1秒时间窗口分割
        
        Args:
            eeg_data (np.ndarray): EEG数据，形状为 (n_trials, n_channels, n_timepoints)
            
        Returns:
            np.ndarray: 分割后的数据，形状为 (n_trials, n_windows, n_channels, window_size)
        """
        n_trials, n_channels, n_timepoints = eeg_data.shape
        n_windows = n_timepoints // self.window_size
        
        # 截取能够完整分割的部分
        truncated_length = n_windows * self.window_size
        truncated_data = eeg_data[:, :, :truncated_length]
        
        # 重新整形为时间窗口
        windowed_data = truncated_data.reshape(n_trials, n_channels, n_windows, self.window_size)
        # 调整维度顺序：(n_trials, n_windows, n_channels, window_size)
        windowed_data = windowed_data.transpose(0, 2, 1, 3)
        
        logger.info(f"数据分割完成: {eeg_data.shape} -> {windowed_data.shape}")
        return windowed_data
    
    def baseline_correction(self, eeg_data: np.ndarray, baseline_data: np.ndarray) -> np.ndarray:
        """
        基线校正：计算基线平均值并从数据中减去
        
        Args:
            eeg_data (np.ndarray): EEG数据，形状为 (n_trials, n_windows, n_channels, window_size)
            baseline_data (np.ndarray): 基线数据，形状为 (n_trials, n_channels, baseline_samples)
            
        Returns:
            np.ndarray: 基线校正后的数据
        """
        n_trials, n_channels, _ = baseline_data.shape
        
        # 将基线数据分成3个相等的片段
        segment_length = self.baseline_samples // 3
        baseline_segments = []
        
        for i in range(3):
            start_idx = i * segment_length
            end_idx = (i + 1) * segment_length
            segment = baseline_data[:, :, start_idx:end_idx]
            baseline_segments.append(segment)
        
        # 将3个片段逐元素相加并求平均值
        baseline_sum = np.zeros_like(baseline_segments[0])
        for segment in baseline_segments:
            baseline_sum += segment
        
        baseline_mean = baseline_sum / 3  # 形状：(n_trials, n_channels, segment_length)
        
        # 计算每个通道的总体基线均值
        baseline_avg = baseline_mean.mean(axis=2)  # (n_trials, n_channels)
        
        # 从EEG数据中减去基线均值
        # baseline_avg形状: (n_trials, n_channels)
        # eeg_data形状: (n_trials, n_windows, n_channels, window_size)
        # 需要调整baseline_avg的形状以匹配广播
        baseline_avg_expanded = baseline_avg[:, None, :, None]  # (n_trials, 1, n_channels, 1)
        corrected_data = eeg_data - baseline_avg_expanded  # 广播到所有时间窗口
        
        logger.info("基线校正完成")
        return corrected_data
    
    def normalize_data(self, data: np.ndarray) -> np.ndarray:
        """
        对每个通道进行最大最小值归一化
        
        Args:
            data (np.ndarray): 输入数据，形状为 (n_trials, n_windows, n_channels, window_size)
            
        Returns:
            np.ndarray: 归一化后的数据
        """
        normalized_data = np.zeros_like(data)
        
        for trial_idx in range(data.shape[0]):
            for channel_idx in range(data.shape[2]):
                # 获取该试次该通道的所有数据
                channel_data = data[trial_idx, :, channel_idx, :].flatten()
                
                # 计算最大最小值
                min_val = channel_data.min()
                max_val = channel_data.max()
                
                # 避免除零
                if max_val - min_val > 1e-8:
                    # 归一化到[0, 1]
                    normalized_data[trial_idx, :, channel_idx, :] = \
                        (data[trial_idx, :, channel_idx, :] - min_val) / (max_val - min_val)
                else:
                    # 如果最大最小值相等，设为0
                    normalized_data[trial_idx, :, channel_idx, :] = 0
        
        logger.info("数据归一化完成")
        return normalized_data
    
    def create_eeg_matrices(self, windowed_data: np.ndarray) -> np.ndarray:
        """
        根据10-20导联系统将EEG数据转换为9x9矩阵
        
        Args:
            windowed_data (np.ndarray): 时间窗口数据，形状为 (n_trials, n_windows, n_channels, window_size)
            
        Returns:
            np.ndarray: EEG矩阵，形状为 (n_trials, n_windows, window_size, 9, 9)
        """
        n_trials, n_windows, n_channels, window_size = windowed_data.shape
        eeg_matrices = np.zeros((n_trials, n_windows, window_size, 9, 9))
        
        for trial_idx in range(n_trials):
            for window_idx in range(n_windows):
                for time_idx in range(window_size):
                    # 创建9x9矩阵
                    matrix = np.zeros((9, 9))
                    
                    # 填充矩阵
                    for channel_idx in range(min(n_channels, len(self.channel_idx_to_position))):
                        if channel_idx in self.channel_idx_to_position:
                            row, col = self.channel_idx_to_position[channel_idx]
                            matrix[row, col] = windowed_data[trial_idx, window_idx, channel_idx, time_idx]
                    
                    eeg_matrices[trial_idx, window_idx, time_idx] = matrix
        
        logger.info(f"EEG矩阵构建完成: {windowed_data.shape} -> {eeg_matrices.shape}")
        return eeg_matrices
    
    def process_eeg_data(self, eeg_data: np.ndarray, baseline_data: np.ndarray) -> np.ndarray:
        """
        完整的EEG数据预处理流程
        
        Args:
            eeg_data (np.ndarray): EEG数据，形状为 (n_trials, n_channels, n_timepoints)
            baseline_data (np.ndarray): 基线数据，形状为 (n_trials, n_channels, baseline_samples)
            
        Returns:
            np.ndarray: 预处理后的EEG矩阵，形状为 (n_trials, n_windows, window_size, 9, 9)
        """
        logger.info("开始EEG数据预处理...")
        
        # 1. 按时间窗口分割
        windowed_data = self.split_into_time_windows(eeg_data)
        
        # 2. 基线校正
        corrected_data = self.baseline_correction(windowed_data, baseline_data)
        
        # 3. 归一化
        normalized_data = self.normalize_data(corrected_data)
        
        # 4. 构建EEG矩阵
        eeg_matrices = self.create_eeg_matrices(normalized_data)
        
        logger.info(f"EEG数据预处理完成: 输入{eeg_data.shape} -> 输出{eeg_matrices.shape}")
        return eeg_matrices
    
    def print_matrix_layout(self):
        """打印9x9矩阵的通道布局"""
        print("=" * 60)
        print("9x9 EEG矩阵通道布局（基于10-20导联系统）")
        print("=" * 60)
        for i, row in enumerate(self.matrix_layout):
            row_str = ""
            for j, channel in enumerate(row):
                if channel is None:
                    row_str += "  0  "
                else:
                    row_str += f"{channel:>4} "
            print(f"行 {i}: {row_str}")
        print("=" * 60)
        
        # 显示DEAP通道到矩阵位置的映射
        print("\nDEAP通道到矩阵位置的映射:")
        for idx, channel_name in enumerate(self.eeg_channels):
            if idx in self.channel_idx_to_position:
                row, col = self.channel_idx_to_position[idx]
                print(f"通道 {idx+1:2d} ({channel_name:>4}): 位置 ({row}, {col})")
        print("=" * 60)


if __name__ == "__main__":
    main()
