import os
import numpy as np
import pandas as pd
from scipy import signal
import mne
import torch

class EEGDataAugmentation:
    def __init__(self, noise_factor=0.02, shift_limit=0.05):
        """
        EEG数据增强类
        
        参数:
        - noise_factor: 高斯噪声的强度
        - shift_limit: 时间平移的最大比例
        """
        self.noise_factor = noise_factor
        self.shift_limit = shift_limit
    
    def add_noise(self, data):
        """添加高斯噪声"""
        noise = np.random.normal(0, self.noise_factor, data.shape)
        return data + noise
    
    def time_shift(self, data):
        """时间平移"""
        shift = int(data.shape[-1] * self.shift_limit)
        if shift > 0:
            direction = np.random.choice([-1, 1])
            shift_value = np.random.randint(1, shift)
            shifted = np.roll(data, direction*shift_value, axis=-1)
            return shifted
        return data
    
    def apply_augmentation(self, data, augment_prob=0.5):
        """应用数据增强
        
        参数:
        - data: shape (trials, channels, time_points)
        - augment_prob: 应用每种增强的概率
        """
        augmented_data = data.copy()
        
        # 随机应用高斯噪声
        if np.random.random() < augment_prob:
            augmented_data = self.add_noise(augmented_data)
        
        # 随机应用时间平移
        if np.random.random() < augment_prob:
            augmented_data = self.time_shift(augmented_data)
        
        return augmented_data

def process_eeg_data(data, sfreq=250, augment=False):
    """
    对EEG数据进行全面的预处理
    
    参数:
    - data: shape (trials, channels, time_points)
    - sfreq: 采样频率，默认250Hz
    - augment: 是否启用数据增强
    
    返回:
    - processed_data: 预处理后的数据
    """
    processed_data = np.zeros_like(data)
    augmenter = EEGDataAugmentation() if augment else None
    
    for trial in range(data.shape[0]):
        # 1. 创建MNE Raw对象
        ch_names = [f'EEG{i+1}' for i in range(data.shape[1])]
        ch_types = ['eeg'] * data.shape[1]
        info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
        raw = mne.io.RawArray(data[trial], info)
        
        # 2. 信号滤波
        # 带通滤波 4-40Hz
        raw.filter(l_freq=4, h_freq=40, method='iir')
        
        try:
            # 使用scipy的iirnotch滤波器替代MNE的notch_filter
            nyq = sfreq / 2
            freq = 50 / nyq
            Q = 30
            b, a = signal.iirnotch(freq, Q)
            
            # 应用陷波滤波器
            processed = raw.get_data()
            for ch in range(processed.shape[0]):
                processed[ch] = signal.filtfilt(b, a, processed[ch])
            
            # 数据标准化 (每个通道独立)
            for ch in range(processed.shape[0]):
                ch_data = processed[ch]
                ch_mean = np.mean(ch_data)
                ch_std = np.std(ch_data)
                processed[ch] = (ch_data - ch_mean) / (ch_std + 1e-10)
            
            # 应用数据增强（如果启用）
            if augment and augmenter is not None:
                processed = augmenter.apply_augmentation(processed)
            
            processed_data[trial] = processed
            
        except Exception as e:
            print(f"Error in trial {trial}: {e}")
            continue

    return processed_data

def get_subject_id(filename):
    """从文件名中提取被试ID"""
    # 文件名格式为 "MI-EEG-A01T.csv" 或 "MI-EEG-A01E.csv"
    # 提取"01"部分并转换为整数
    subject_num = filename.split('-')[-1][1:3]  # 提取"01"
    return int(subject_num)

def load_and_process_data():
    # 设置路径
    train_data_folder = '/root/autodl-tmp/train'
    train_label_folder = '/root/autodl-tmp/train_label'
    test_data_folder = '/root/autodl-tmp/test'
    test_label_folder = '/root/autodl-tmp/test_label'
    save_root = '/root/autodl-tmp/datas'
    os.makedirs(save_root, exist_ok=True)

    # 处理训练数据
    train_data_list = []
    train_labels_list = []
    train_pid_list = []
    
    train_files = sorted([f for f in os.listdir(train_data_folder) if f.endswith('T.csv')])
    for file in train_files:
        subject_id = get_subject_id(file)
        print(f"Processing training file: {file}, Subject ID: {subject_id}")
        
        # 读取数据和标签
        data = pd.read_csv(os.path.join(train_data_folder, file))
        labels = pd.read_csv(os.path.join(train_label_folder, f"Etiquetas{file.split('-')[-1]}"))
        labels = labels.values.flatten() - 1  # 将1-4转换为0-3
        
        # 确保标签在正确范围内
        if not np.all((labels >= 0) & (labels <= 3)):
            print(f"Warning: Invalid label values found in {file}")
            labels = np.clip(labels, 0, 3)
        
        print(f"Label values range: {np.min(labels)} to {np.max(labels)}")
        
        # 打印数据维度信息
        print(f"Data shape: {data.values.shape}")
        print(f"Number of trials (labels): {len(labels)}")
        
        # 重塑数据为(trials, channels=22, time_points=1000)格式
        data_array = data.values  # shape: (287, 22000)
        n_trials = len(labels)
        n_channels = 22
        time_points = 1000  # 每个通道的时间点数
        
        print(f"Reshaping to: ({n_trials}, {n_channels}, {time_points})")
        
        try:
            # 重塑数据
            reshaped_data = np.zeros((n_trials, n_channels, time_points))
            for trial in range(n_trials):
                for channel in range(n_channels):
                    start_idx = channel * time_points
                    end_idx = (channel + 1) * time_points
                    reshaped_data[trial, channel, :] = data_array[trial, start_idx:end_idx]
            
            print(f"Reshaped data shape: {reshaped_data.shape}")
            
            # 数据预处理
            processed_data = process_eeg_data(reshaped_data, augment=True)
            
            # 添加基线校正
            baseline_period = slice(0, 100)  # 使用前100个时间点作为基线
            baseline_mean = np.mean(processed_data[..., baseline_period], axis=-1, keepdims=True)
            processed_data = processed_data - baseline_mean
            
            train_data_list.append(processed_data)
            train_labels_list.append(labels)
            train_pid_list.extend([subject_id] * len(labels))
            
        except Exception as e:
            print(f"Error processing data: {e}")
            continue

    # 处理测试数据
    test_data_list = []
    test_labels_list = []
    test_pid_list = []
    
    test_files = sorted([f for f in os.listdir(test_data_folder) if f.endswith('E.csv')])
    for file in test_files:
        subject_id = get_subject_id(file)
        print(f"Processing test file: {file}, Subject ID: {subject_id}")
        
        data = pd.read_csv(os.path.join(test_data_folder, file))
        labels = pd.read_csv(os.path.join(test_label_folder, f"Etiquetas{file.split('-')[-1]}"))
        labels = labels.values.flatten() - 1  # 将1-4转换为0-3
        
        # 确保标签在正确范围内
        if not np.all((labels >= 0) & (labels <= 3)):
            print(f"Warning: Invalid label values found in {file}")
            labels = np.clip(labels, 0, 3)
        
        data_array = data.values
        n_trials = len(labels)
        n_channels = 22
        time_points = 1000
        
        try:
            # 使用相同的重塑逻辑
            reshaped_data = np.zeros((n_trials, n_channels, time_points))
            for trial in range(n_trials):
                for channel in range(n_channels):
                    start_idx = channel * time_points
                    end_idx = (channel + 1) * time_points
                    reshaped_data[trial, channel, :] = data_array[trial, start_idx:end_idx]
            
            processed_data = process_eeg_data(reshaped_data)
            
            # 添加基线校正
            baseline_period = slice(0, 100)  # 使用前100个时间点作为基线
            baseline_mean = np.mean(processed_data[..., baseline_period], axis=-1, keepdims=True)
            processed_data = processed_data - baseline_mean
            
            test_data_list.append(processed_data)
            test_labels_list.append(labels)
            test_pid_list.extend([subject_id] * len(labels))
            
        except Exception as e:
            print(f"Error processing data: {e}")
            continue

    # 合并数据
    print("\nMerging training data...")
    X_train = np.vstack(train_data_list) if train_data_list else np.array([])
    y_train = np.concatenate(train_labels_list) if train_labels_list else np.array([])
    pid_train = np.array(train_pid_list)

    print("\nMerging test data...")
    X_test = np.vstack(test_data_list) if test_data_list else np.array([])
    y_test = np.concatenate(test_labels_list) if test_labels_list else np.array([])
    pid_test = np.array(test_pid_list)

    if len(X_train) == 0 or len(X_test) == 0:
        print("Error: No data was successfully processed!")
        return

    # 最终的数据标准化（在所有trials上）
    print("\nNormalizing data...")
    X = np.concatenate((X_train, X_test))
    print("X shape after concatenation:", X.shape)
    
    # 对每个通道分别进行标准化
    for ch in range(X.shape[1]):
        ch_mean = np.mean(X[:, ch, :])
        ch_std = np.std(X[:, ch, :])
        X[:, ch, :] = (X[:, ch, :] - ch_mean) / ch_std
    
    # 合并标签和ID
    y = np.concatenate((y_train, y_test))
    pid = np.concatenate((pid_train, pid_test))

    print("\nData shape before saving:")
    print("X shape:", X.shape)
    print("y shape:", y.shape)
    print("pid shape:", pid.shape)

    # 保存处理后的数据
    print("\nSaving data...")
    np.save(os.path.join(save_root, 'x.npy'), X)#将 X 数组保存为 x.npy 文件。
    np.save(os.path.join(save_root, 'y.npy'), y)
    np.save(os.path.join(save_root, 'pid.npy'), pid)

    print("\nData saved to:", save_root)

if __name__ == "__main__":
    load_and_process_data() 