import numpy as np
import scipy.io
import os
import re

import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
from models.gan_augmenter import CSIGAN


class CSIDataset(Dataset):
    def __init__(self, features, labels, config):
        self.features = features
        self.labels = labels
        self.config = config

        # GAN数据增强
        if config.use_gan_augment:
            self.augment_with_gan()

    def augment_with_gan(self):
        # 只对fall类别(10)进行增强
        target_class = 10
        class_counts = np.bincount(self.labels)
        target_count = class_counts[target_class]

        if target_count == 0:
            return

        # 计算需要生成的样本数量
        augment_num = int(target_count * self.config.gan_augment_ratio) - target_count

        # 训练GAN
        gan = CSIGAN(self.config)
        features_tensor = torch.tensor(self.features, dtype=torch.float32)
        labels_tensor = torch.tensor(self.labels, dtype=torch.long)

        print(f"\n==== 训练GAN以增强类别{target_class}(fall) ====")
        for epoch in range(self.config.gan_num_epochs):
            d_loss, g_loss = gan.train(features_tensor, labels_tensor, target_class)
            if epoch % 10 == 0:
                print(f"GAN Epoch {epoch}: D_loss={d_loss:.4f}, G_loss={g_loss:.4f}")

        # 生成新样本
        fake_samples = gan.generate_samples(features_tensor, labels_tensor, target_class)
        if fake_samples is not None:
            fake_samples = fake_samples.cpu().numpy()
            fake_labels = np.full(len(fake_samples), target_class)

            # 添加到数据集
            self.features = np.concatenate([self.features, fake_samples])
            self.labels = np.concatenate([self.labels, fake_labels])
            print(f"已生成{len(fake_samples)}个fall样本")

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return self.features[idx], self.labels[idx]


def load_csi_from_mat(mat_file_path):
    """加载CSI数据并返回复数形式"""
    mat_data = scipy.io.loadmat(mat_file_path)
    return mat_data['csi'].astype(np.complex64)


def parse_filename(filename):
    """解析文件名获取志愿者和动作信息"""
    match = re.match(r'([A-Za-z]+)_([a-z]+)', filename)
    return match.group(1), match.group(2)


def prepare_features(csi_list, feature_type="enhanced"):
    """改进的特征工程"""
    features = []
    for csi in csi_list:
        # 基础特征
        amp = np.abs(csi)
        phase = np.angle(csi)

        if feature_type == "enhanced":
            # 增强特征工程
            amp_log = np.log1p(amp)
            phase_norm = phase / np.pi

            # 差分特征
            amp_diff = np.diff(amp_log, axis=1)
            phase_diff = np.diff(phase_norm, axis=1)

            # 填充保持尺寸
            amp_diff = np.pad(amp_diff, ((0, 0), (0, 1), (0, 0)), mode='edge')
            phase_diff = np.pad(phase_diff, ((0, 0), (0, 1), (0, 0)), mode='edge')

            # 组合特征 [4, 3, 30, 200]
            feat = np.stack([amp_log, phase_norm, amp_diff, phase_diff])
        else:
            # 基础特征 [2, 3, 30, 200]
            feat = np.stack([amp, phase])

        features.append(feat)

    return np.array(features, dtype=np.float32)


def load_dataset(config):
    """加载并预处理数据集"""
    csi_data, metadata = [], []
    activity_dict = {
        'lying': 0, 'sitting': 1, 'run': 2, 'walk': 3, 'pickup': 4,
        'wave': 5, 'jump': 6, 'squat': 7, 'sitdown': 8, 'standup': 9, 'fall': 10
    }

    for env in ['bedroom', 'meetingroom']:
        env_path = os.path.join(config.data_dir, env)
        if not os.path.isdir(env_path): continue

        for action_dir in os.listdir(env_path):
            action_path = os.path.join(env_path, action_dir)
            if not os.path.isdir(action_path): continue

            for filename in os.listdir(action_path):
                if not filename.endswith('.mat'): continue

                try:
                    csi = load_csi_from_mat(os.path.join(action_path, filename))
                    volunteer, activity = parse_filename(filename)
                    if activity not in activity_dict: continue

                    csi_data.append(csi)
                    metadata.append(activity_dict[activity])
                except Exception as e:
                    print(f"Error loading {filename}: {str(e)}")

    features = prepare_features(csi_data, config.feature_type)
    labels = np.array(metadata)

    # 分割数据集
    X_train, X_test, y_train, y_test = train_test_split(
        features, labels,
        test_size=config.test_size,
        random_state=config.random_state,
        stratify=labels
    )

    # 使用自定义数据集类
    train_dataset = CSIDataset(X_train, y_train, config)
    test_dataset = CSIDataset(X_test, y_test, config)

    return train_dataset, test_dataset