"""
102受试者SSVEP数据集加载器
数据维度: [8通道, 710时间点, 2电极类型, 10区块, 12目标]
采样率: 250Hz, 时长: 2.84秒 (0.5s基线 + 2s刺激 + 0.14s视觉潜伏期 + 0.2s结束)
"""

import numpy as np
import scipy.io
import os
from typing import Tuple, List, Optional, Dict
import warnings

class SSVEP102DatasetLoader:
    """102受试者SSVEP数据集加载器类"""
    
    def __init__(self, dataset_path: str = None):
        """
        初始化数据集加载器
        
        参数:
        dataset_path: 数据集路径，None表示自动检测
        """
        if dataset_path is None:
            # 自动检测数据集路径
            self.dataset_path = self._find_dataset_path()
        else:
            self.dataset_path = dataset_path
        self.fs = 250  # 采样率 250Hz
        self.n_channels = 8  # 8个通道
        self.n_timepoints = 710  # 710个时间点 (2.84秒)
        self.n_electrode_types = 2  # 2种电极类型 (1:湿电极, 2:干电极)
        self.n_blocks = 10  # 10个区块
        self.n_targets = 12  # 12个目标频率
        self.n_subjects = 102  # 102个受试者
        
        # 时间窗口信息 (基于2.84秒总长度)
        self.baseline_duration = 0.5  # 基线时长
        self.stimulus_duration = 2.0  # 刺激时长
        self.visual_latency = 0.14  # 视觉潜伏期
        self.post_stimulus = 0.2  # 刺激后时长
        
        # 时间点索引
        self.baseline_samples = int(self.baseline_duration * self.fs)  # 125个样本点
        self.stimulus_samples = int(self.stimulus_duration * self.fs)  # 500个样本点
        
        # SSVEP频率 (从stimulation_information.pdf中获取的实际值)
        # 按字符顺序: 1,2,3,4,5,6,7,8,9,0,*,# 对应目标索引 0-11
        self.ssvep_frequencies = [9.25, 11.25, 13.25, 9.75, 11.75, 13.75, 10.25, 12.25, 14.25, 10.75, 12.75, 14.75]
        self.character_labels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '*', '#']
        
        # 通道名称 (假设，需要确认实际通道位置)
        self.channel_names = ['Ch1', 'Ch2', 'Ch3', 'Ch4', 'Ch5', 'Ch6', 'Ch7', 'Ch8']
        
        print(f"数据集加载器初始化:")
        print(f"  数据集路径: {self.dataset_path}")
        print(f"  采样率: {self.fs} Hz")
        print(f"  数据维度: [{self.n_channels}, {self.n_timepoints}, {self.n_electrode_types}, {self.n_blocks}, {self.n_targets}]")
        print(f"  时间窗口: 基线{self.baseline_duration}s + 刺激{self.stimulus_duration}s + 其他{self.visual_latency + self.post_stimulus}s")
    
    def _find_dataset_path(self) -> str:
        """
        自动查找数据集路径
        
        返回:
        dataset_path: 找到的数据集路径
        """
        # 可能的数据集路径列表
        possible_paths = [
            "../dataset102_12",  # 相对于当前脚本目录
            "../../SSVEP/dataset102_12",  # 从项目根目录
            os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "dataset102_12"),  # 绝对路径计算
            "SSVEP/dataset102_12",  # 从项目根目录
            "dataset102_12"  # 当前目录
        ]
        
        for path in possible_paths:
            abs_path = os.path.abspath(path)
            if os.path.exists(path):
                # 检查是否包含S001.mat文件来验证这是正确的数据集
                test_file = os.path.join(path, "S001.mat")
                if os.path.exists(test_file):
                    print(f"自动检测到数据集路径: {path} -> {abs_path}")
                    return path
        
        # 如果都找不到，使用默认路径并给出警告
        default_path = "../dataset102_12"
        print(f"警告: 未找到数据集，使用默认路径: {default_path}")
        print("请确保数据集位于正确位置或手动指定路径")
        return default_path
    
    def load_subject_data(self, subject_id: int) -> Optional[np.ndarray]:
        """
        加载单个受试者数据
        
        参数:
        subject_id: 受试者ID (1-102)
        
        返回:
        data: 形状为 [8, 710, 2, 10, 12] 的数据矩阵，如果加载失败则返回None
        """
        if subject_id < 1 or subject_id > 102:
            raise ValueError(f"受试者ID必须在1-102之间，得到: {subject_id}")
        
        # 构建文件名
        filename = f"S{subject_id:03d}.mat"
        filepath = os.path.join(self.dataset_path, filename)
        
        if not os.path.exists(filepath):
            print(f"警告: 文件不存在 {filepath}")
            return None
        
        try:
            # 加载.mat文件
            mat_data = scipy.io.loadmat(filepath)
            
            if 'data' not in mat_data:
                print(f"警告: 文件 {filename} 中没有'data'字段")
                return None
            
            data = mat_data['data']
            
            # 验证数据维度
            expected_shape = (self.n_channels, self.n_timepoints, self.n_electrode_types, 
                            self.n_blocks, self.n_targets)
            if data.shape != expected_shape:
                print(f"警告: 数据维度不匹配，期望 {expected_shape}，实际 {data.shape}")
                return None
            
            print(f"成功加载受试者 {subject_id} 数据，形状: {data.shape}")
            return data
            
        except Exception as e:
            print(f"加载受试者 {subject_id} 数据失败: {e}")
            return None
    
    def extract_trials(self, subject_data: np.ndarray, electrode_type: int = 1, 
                      time_window: Optional[Tuple[float, float]] = None) -> Tuple[np.ndarray, np.ndarray]:
        """
        从受试者数据中提取试验数据
        
        参数:
        subject_data: 受试者数据 [8, 710, 2, 10, 12]
        electrode_type: 电极类型 (1: 湿电极, 2: 干电极)
        time_window: 时间窗口 (start_time, end_time) 以秒为单位，None表示使用全部数据
        
        返回:
        trials_data: 试验数据 [n_trials, n_channels, n_timepoints]
        labels: 标签 [n_trials] (0-11对应12个目标)
        """
        if electrode_type not in [1, 2]:
            raise ValueError("电极类型必须是1(湿电极)或2(干电极)")
        
        electrode_idx = electrode_type - 1  # 转换为0索引
        
        # 提取指定电极类型的数据 [8, 710, 10, 12]
        electrode_data = subject_data[:, :, electrode_idx, :, :]
        
        # 应用时间窗口
        if time_window is not None:
            start_time, end_time = time_window
            start_sample = max(0, int(start_time * self.fs))
            end_sample = min(self.n_timepoints, int(end_time * self.fs))
            electrode_data = electrode_data[:, start_sample:end_sample, :, :]
            print(f"应用时间窗口 [{start_time:.1f}s, {end_time:.1f}s]: 样本点 [{start_sample}, {end_sample})")
        
        # 重组数据为试验格式
        n_timepoints = electrode_data.shape[1]
        trials_data = []
        labels = []
        
        # 遍历所有区块和目标
        for block_idx in range(self.n_blocks):
            for target_idx in range(self.n_targets):
                # 提取单个试验 [8, n_timepoints]
                trial = electrode_data[:, :, block_idx, target_idx]
                trials_data.append(trial)
                labels.append(target_idx)
        
        trials_data = np.array(trials_data)  # [n_trials, n_channels, n_timepoints]
        labels = np.array(labels)  # [n_trials]
        
        print(f"提取试验数据完成:")
        print(f"  电极类型: {'湿电极' if electrode_type == 1 else '干电极'}")
        print(f"  试验数据形状: {trials_data.shape}")
        print(f"  标签形状: {labels.shape}")
        print(f"  标签分布: {np.bincount(labels)}")
        
        return trials_data, labels
    
    def split_train_test(self, trials_data: np.ndarray, labels: np.ndarray, 
                        train_blocks: List[int] = None, test_blocks: List[int] = None) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
        """
        按区块分割训练和测试数据
        
        参数:
        trials_data: 试验数据 [n_trials, n_channels, n_timepoints]
        labels: 标签 [n_trials]
        train_blocks: 训练区块列表，默认为前8个区块 [0,1,2,3,4,5,6,7]
        test_blocks: 测试区块列表，默认为后2个区块 [8,9]
        
        返回:
        X_train, X_test, y_train, y_test
        """
        if train_blocks is None:
            train_blocks = list(range(8))  # 前8个区块用于训练
        if test_blocks is None:
            test_blocks = list(range(8, 10))  # 后2个区块用于测试
        
        # 计算每个区块的试验索引
        trials_per_block = self.n_targets  # 每个区块12个试验
        
        train_indices = []
        test_indices = []
        
        for block_idx in range(self.n_blocks):
            block_start = block_idx * trials_per_block
            block_end = (block_idx + 1) * trials_per_block
            block_indices = list(range(block_start, block_end))
            
            if block_idx in train_blocks:
                train_indices.extend(block_indices)
            elif block_idx in test_blocks:
                test_indices.extend(block_indices)
        
        X_train = trials_data[train_indices]
        X_test = trials_data[test_indices]
        y_train = labels[train_indices]
        y_test = labels[test_indices]
        
        print(f"数据分割完成:")
        print(f"  训练区块: {train_blocks}")
        print(f"  测试区块: {test_blocks}")
        print(f"  训练数据: {X_train.shape}, 标签: {y_train.shape}")
        print(f"  测试数据: {X_test.shape}, 标签: {y_test.shape}")
        
        return X_train, X_test, y_train, y_test
    
    def load_impedance_data(self) -> Optional[np.ndarray]:
        """
        加载阻抗数据
        
        返回:
        impedance_data: 形状为 [8, 10, 2, 102] 的阻抗数据
        """
        filepath = os.path.join(self.dataset_path, "Impedance.mat")
        
        if not os.path.exists(filepath):
            print(f"警告: 阻抗文件不存在 {filepath}")
            return None
        
        try:
            mat_data = scipy.io.loadmat(filepath)
            
            # 查找阻抗数据字段
            impedance_key = None
            for key in mat_data.keys():
                if not key.startswith('__') and isinstance(mat_data[key], np.ndarray):
                    if mat_data[key].shape == (8, 10, 2, 102):
                        impedance_key = key
                        break
            
            if impedance_key is None:
                print("警告: 未找到正确维度的阻抗数据")
                return None
            
            impedance_data = mat_data[impedance_key]
            print(f"成功加载阻抗数据，形状: {impedance_data.shape}")
            return impedance_data
            
        except Exception as e:
            print(f"加载阻抗数据失败: {e}")
            return None
    
    def load_subject_info(self) -> Optional[np.ndarray]:
        """
        加载受试者信息
        
        返回:
        subject_info: 受试者信息矩阵
        """
        filepath = os.path.join(self.dataset_path, "Subjects_Information.mat")
        
        if not os.path.exists(filepath):
            print(f"警告: 受试者信息文件不存在 {filepath}")
            return None
        
        try:
            mat_data = scipy.io.loadmat(filepath)
            
            # 查找受试者信息字段
            info_key = None
            for key in mat_data.keys():
                if not key.startswith('__') and isinstance(mat_data[key], np.ndarray):
                    info_key = key
                    break
            
            if info_key is None:
                print("警告: 未找到受试者信息数据")
                return None
            
            subject_info = mat_data[info_key]
            print(f"成功加载受试者信息，形状: {subject_info.shape}")
            return subject_info
            
        except Exception as e:
            print(f"加载受试者信息失败: {e}")
            return None
    
    def get_stimulus_window_data(self, trials_data: np.ndarray, 
                               baseline_correct: bool = True) -> np.ndarray:
        """
        提取刺激窗口数据并进行基线校正
        
        参数:
        trials_data: 试验数据 [n_trials, n_channels, n_timepoints]
        baseline_correct: 是否进行基线校正
        
        返回:
        stimulus_data: 刺激窗口数据 [n_trials, n_channels, stimulus_samples]
        """
        # 定义刺激窗口 (0.5s后开始，持续2s)
        stimulus_start = self.baseline_samples
        stimulus_end = stimulus_start + self.stimulus_samples
        
        # 提取刺激窗口数据
        stimulus_data = trials_data[:, :, stimulus_start:stimulus_end]
        
        if baseline_correct:
            # 计算基线 (前0.5秒的平均值)
            baseline = np.mean(trials_data[:, :, :self.baseline_samples], axis=2, keepdims=True)
            # 基线校正
            stimulus_data = stimulus_data - baseline
            print("已应用基线校正")
        
        print(f"提取刺激窗口数据: {stimulus_data.shape}")
        return stimulus_data
    
    def batch_load_subjects(self, subject_ids: List[int], electrode_type: int = 1,
                           time_window: Optional[Tuple[float, float]] = None) -> Tuple[np.ndarray, np.ndarray, List[int]]:
        """
        批量加载多个受试者数据
        
        参数:
        subject_ids: 受试者ID列表
        electrode_type: 电极类型
        time_window: 时间窗口
        
        返回:
        all_trials: 所有试验数据
        all_labels: 所有标签
        valid_subjects: 成功加载的受试者ID列表
        """
        all_trials = []
        all_labels = []
        valid_subjects = []
        
        for subject_id in subject_ids:
            print(f"加载受试者 {subject_id}...")
            subject_data = self.load_subject_data(subject_id)
            
            if subject_data is not None:
                trials, labels = self.extract_trials(subject_data, electrode_type, time_window)
                all_trials.append(trials)
                all_labels.append(labels)
                valid_subjects.append(subject_id)
            else:
                print(f"跳过受试者 {subject_id}")
        
        if len(all_trials) > 0:
            all_trials = np.concatenate(all_trials, axis=0)
            all_labels = np.concatenate(all_labels, axis=0)
            
            print(f"批量加载完成:")
            print(f"  有效受试者: {len(valid_subjects)}")
            print(f"  总试验数: {all_trials.shape[0]}")
            print(f"  数据形状: {all_trials.shape}")
            
            return all_trials, all_labels, valid_subjects
        else:
            print("未成功加载任何受试者数据")
            return np.array([]), np.array([]), []


def main():
    """测试数据加载器"""
    # 创建数据加载器
    loader = SSVEP102DatasetLoader()
    
    # 测试加载单个受试者
    print("="*50)
    print("测试加载受试者1数据...")
    subject_data = loader.load_subject_data(1)
    
    if subject_data is not None:
        # 提取试验数据 (使用湿电极，刺激窗口0.5-2.5s)
        trials, labels = loader.extract_trials(
            subject_data, 
            electrode_type=1,
            time_window=(0.5, 2.5)
        )
        
        # 分割训练测试数据
        X_train, X_test, y_train, y_test = loader.split_train_test(trials, labels)
        
        # 提取刺激窗口数据
        stimulus_data = loader.get_stimulus_window_data(trials, baseline_correct=True)
        
        print(f"最终数据摘要:")
        print(f"  刺激窗口数据: {stimulus_data.shape}")
        print(f"  训练数据: {X_train.shape}")
        print(f"  测试数据: {X_test.shape}")
    
    # 测试加载阻抗和受试者信息
    print("="*50)
    print("测试加载阻抗数据和受试者信息...")
    impedance_data = loader.load_impedance_data()
    subject_info = loader.load_subject_info()


if __name__ == "__main__":
    main()
