import torch
import torch.nn as nn
import torch.nn.functional as F


class DualPathDiscriminator(nn.Module):
    def __init__(self, input_dim=256, hidden_dim=64, bis_prediction=True):
        """
        双路卷积神经网络判别器

        Args:
            input_dim: 输入脑电信号长度
            hidden_dim: 隐藏层维度基数
            bis_prediction: 是否包含BIS预测分支
        """
        super(DualPathDiscriminator, self).__init__()

        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.bis_prediction = bis_prediction

        # 时域路径 - 处理原始时域信号
        self.time_path = nn.Sequential(
            # 输入: (batch_size, 1, 256)
            nn.Conv1d(1, hidden_dim, kernel_size=15, stride=2, padding=7),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Dropout(0.3),

            nn.Conv1d(hidden_dim, hidden_dim * 2, kernel_size=11, stride=2, padding=5),
            nn.BatchNorm1d(hidden_dim * 2),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Dropout(0.3),

            nn.Conv1d(hidden_dim * 2, hidden_dim * 4, kernel_size=7, stride=2, padding=3),
            nn.BatchNorm1d(hidden_dim * 4),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Dropout(0.3),

            nn.Conv1d(hidden_dim * 4, hidden_dim * 8, kernel_size=5, stride=2, padding=2),
            nn.BatchNorm1d(hidden_dim * 8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Dropout(0.3),

            nn.AdaptiveAvgPool1d(1)  # 全局平均池化
        )

        # 频域路径 - 处理频域特征
        self.freq_path = nn.Sequential(
            # 输入: (batch_size, 4, 129) [幅度谱，相位谱，功率谱，频谱熵]
            nn.Conv1d(4, hidden_dim, kernel_size=7, stride=2, padding=3),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Dropout(0.3),

            nn.Conv1d(hidden_dim, hidden_dim * 2, kernel_size=5, stride=2, padding=2),
            nn.BatchNorm1d(hidden_dim * 2),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Dropout(0.3),

            nn.Conv1d(hidden_dim * 2, hidden_dim * 4, kernel_size=3, stride=2, padding=1),
            nn.BatchNorm1d(hidden_dim * 4),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Dropout(0.3),

            nn.AdaptiveAvgPool1d(1)  # 全局平均池化
        )

        # 特征融合后的全连接层
        fusion_dim = hidden_dim * 8 + hidden_dim * 4  # 时域特征 + 频域特征

        # 对抗网络 - 判断真假
        self.adversarial_net = nn.Sequential(
            nn.Linear(fusion_dim, hidden_dim * 4),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Dropout(0.5),

            nn.Linear(hidden_dim * 4, hidden_dim * 2),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Dropout(0.5),

            nn.Linear(hidden_dim * 2, 1),
            nn.Sigmoid()  # 输出概率
        )

        # BIS预测网络
        if self.bis_prediction:
            self.bis_pred_net = nn.Sequential(
                nn.Linear(fusion_dim, hidden_dim * 4),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Dropout(0.5),

                nn.Linear(hidden_dim * 4, hidden_dim * 2),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Dropout(0.5),

                nn.Linear(hidden_dim * 2, 1),
                nn.Sigmoid()  # BIS值通常在0-100之间，这里归一化到0-1
            )

        # 初始化权重
        self._initialize_weights()

    def _initialize_weights(self):
        """权重初始化"""
        for m in self.modules():
            if isinstance(m, nn.Conv1d):
                nn.init.normal_(m.weight, 0.0, 0.02)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm1d):
                nn.init.normal_(m.weight, 1.0, 0.02)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0.0, 0.02)
                nn.init.constant_(m.bias, 0)

    def extract_spectral_features(self, x):
        """
        提取频域特征

        Args:
            x: 输入信号, shape: (batch_size, 1, seq_len)

        Returns:
            频域特征, shape: (batch_size, 4, freq_bins)
        """
        batch_size, _, seq_len = x.shape

        # 计算FFT
        x_fft = torch.fft.fft(x, dim=-1)

        # 只取正频率部分
        freq_bins = seq_len // 2 + 1

        # 幅度谱
        magnitude = torch.abs(x_fft)[:, :, :freq_bins]

        # 相位谱
        phase = torch.angle(x_fft)[:, :, :freq_bins]

        # 功率谱
        power = magnitude ** 2

        # 频谱熵 - 修复维度问题
        # 首先对功率谱进行归一化
        power_norm = power / (torch.sum(power, dim=-1, keepdim=True) + 1e-8)

        # 计算每个样本的频谱熵
        spectral_entropy = -torch.sum(power_norm * torch.log(power_norm + 1e-8), dim=-1)
        # spectral_entropy形状: (batch_size, 1)

        # 将频谱熵扩展为与幅度谱相同的形状
        spectral_entropy = spectral_entropy.unsqueeze(-1)  # (batch_size, 1, 1)
        spectral_entropy = spectral_entropy.expand(-1, -1, freq_bins)  # (batch_size, 1, freq_bins)

        # 拼接所有频域特征
        spectral_features = torch.cat([magnitude, phase, power, spectral_entropy], dim=1)

        return spectral_features

    def forward(self, x):
        """
        Forward pass

        Args:
            x: 输入脑电信号, shape: (batch_size, 1, input_dim)

        Returns:
            adversarial_out: 对抗网络输出, shape: (batch_size, 1)
            bis_pred: BIS预测输出 (如果启用), shape: (batch_size, 1)
        """
        batch_size = x.shape[0]

        # 时域路径
        time_features = self.time_path(x)  # (batch_size, hidden_dim*8, 1)
        time_features = time_features.view(batch_size, -1)  # (batch_size, hidden_dim*8)

        # 频域路径
        spectral_features = self.extract_spectral_features(x)  # (batch_size, 4, freq_bins)
        freq_features = self.freq_path(spectral_features)  # (batch_size, hidden_dim*4, 1)
        freq_features = freq_features.view(batch_size, -1)  # (batch_size, hidden_dim*4)

        # 特征融合
        fused_features = torch.cat([time_features, freq_features], dim=1)  # (batch_size, hidden_dim*12)

        # 对抗网络输出
        adversarial_out = self.adversarial_net(fused_features)

        # BIS预测输出
        bis_pred = None
        if self.bis_prediction:
            bis_pred = self.bis_pred_net(fused_features) * 100  # 缩放到0-100范围

        return adversarial_out, bis_pred


# 重新定义UNetGenerator1D以确保完整性
class UNetGenerator1D(nn.Module):
    def __init__(self, noise_dim=128, bis_dim=1, output_dim=256, hidden_dim=64):
        """
        U-Net结构的一维脑电信号生成器

        Args:
            noise_dim: 输入噪声维度
            bis_dim: BIS控制信息维度
            output_dim: 输出脑电信号维度
            hidden_dim: 隐藏层维度基数
        """
        super(UNetGenerator1D, self).__init__()

        self.noise_dim = noise_dim
        self.bis_dim = bis_dim
        self.output_dim = output_dim
        self.hidden_dim = hidden_dim

        # 编码器部分 (下采样)
        self.enc1 = self._encoder_block(noise_dim + bis_dim, hidden_dim, kernel_size=7, stride=1, padding=3)
        self.enc2 = self._encoder_block(hidden_dim, hidden_dim * 2, kernel_size=5, stride=2, padding=2)
        self.enc3 = self._encoder_block(hidden_dim * 2, hidden_dim * 4, kernel_size=5, stride=2, padding=2)
        self.enc4 = self._encoder_block(hidden_dim * 4, hidden_dim * 8, kernel_size=5, stride=2, padding=2)

        # 瓶颈层
        self.bottleneck = nn.Sequential(
            nn.Conv1d(hidden_dim * 8, hidden_dim * 8, kernel_size=3, padding=1),
            nn.BatchNorm1d(hidden_dim * 8),
            nn.ReLU(inplace=True),
            nn.Conv1d(hidden_dim * 8, hidden_dim * 8, kernel_size=3, padding=1),
            nn.BatchNorm1d(hidden_dim * 8),
            nn.ReLU(inplace=True)
        )

        # 解码器部分 (上采样)
        self.dec1 = self._decoder_block(hidden_dim * 16, hidden_dim * 4, kernel_size=5, stride=2, padding=2,
                                        output_padding=1)
        self.dec2 = self._decoder_block(hidden_dim * 8, hidden_dim * 2, kernel_size=5, stride=2, padding=2,
                                        output_padding=1)
        self.dec3 = self._decoder_block(hidden_dim * 4, hidden_dim, kernel_size=5, stride=2, padding=2,
                                        output_padding=1)

        # 最终输出层
        self.final_conv = nn.Sequential(
            nn.Conv1d(hidden_dim * 2, hidden_dim, kernel_size=3, padding=1),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(inplace=True),
            nn.Conv1d(hidden_dim, 1, kernel_size=7, padding=3),
            nn.Tanh()  # 输出在[-1,1]范围内
        )

        # 初始化权重
        self._initialize_weights()

    def _encoder_block(self, in_channels, out_channels, kernel_size, stride, padding):
        """编码器块"""
        return nn.Sequential(
            nn.Conv1d(in_channels, out_channels, kernel_size, stride, padding),
            nn.BatchNorm1d(out_channels),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv1d(out_channels, out_channels, kernel_size, 1, padding),
            nn.BatchNorm1d(out_channels),
            nn.LeakyReLU(0.2, inplace=True)
        )

    def _decoder_block(self, in_channels, out_channels, kernel_size, stride, padding, output_padding):
        """解码器块"""
        return nn.Sequential(
            nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride,
                               padding, output_padding),
            nn.BatchNorm1d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv1d(out_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm1d(out_channels),
            nn.ReLU(inplace=True)
        )

    def _initialize_weights(self):
        """权重初始化"""
        for m in self.modules():
            if isinstance(m, nn.Conv1d) or isinstance(m, nn.ConvTranspose1d):
                nn.init.normal_(m.weight, 0.0, 0.02)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm1d):
                nn.init.normal_(m.weight, 1.0, 0.02)
                nn.init.constant_(m.bias, 0)

    def forward(self, noise, bis):
        """
        Forward pass

        Args:
            noise: 输入噪声, shape: (batch_size, noise_dim)
            bis: BIS控制信息, shape: (batch_size, bis_dim)

        Returns:
            生成的脑电信号, shape: (batch_size, 1, output_dim)
        """
        batch_size = noise.size(0)

        # 将噪声和BIS信息拼接
        bis = bis.reshape(-1, self.bis_dim)
        x = torch.cat([noise, bis], dim=1)  # (batch_size, noise_dim + bis_dim)

        # 调整形状以适应1D卷积: (batch_size, channels, length)
        # 初始长度设为32，后续通过上采样到256
        x = x.unsqueeze(-1)  # (batch_size, noise_dim + bis_dim, 1)
        x = F.interpolate(x, size=32, mode='linear', align_corners=False)  # 插值到初始长度

        # 编码器路径
        enc1 = self.enc1(x)  # (batch_size, hidden_dim, 32)
        enc2 = self.enc2(enc1)  # (batch_size, hidden_dim*2, 16)
        enc3 = self.enc3(enc2)  # (batch_size, hidden_dim*4, 8)
        enc4 = self.enc4(enc3)  # (batch_size, hidden_dim*8, 4)

        # 瓶颈层
        bottleneck = self.bottleneck(enc4)  # (batch_size, hidden_dim*8, 4)

        # 解码器路径 (使用跳跃连接)
        dec1 = self.dec1(torch.cat([bottleneck, enc4], dim=1))  # (batch_size, hidden_dim*4, 8)
        dec2 = self.dec2(torch.cat([dec1, enc3], dim=1))  # (batch_size, hidden_dim*2, 16)
        dec3 = self.dec3(torch.cat([dec2, enc2], dim=1))  # (batch_size, hidden_dim, 32)

        # 最终上采样到目标长度
        x = torch.cat([dec3, enc1], dim=1)  # (batch_size, hidden_dim*2, 32)
        x = F.interpolate(x, size=self.output_dim, mode='linear', align_corners=False)  # 上采样到256

        # 最终卷积
        output = self.final_conv(x)  # (batch_size, 1, 256)

        return output


# 测试代码
if __name__ == "__main__":
    # 参数设置
    batch_size = 4
    noise_dim = 128
    bis_dim = 1
    eeg_dim = 256

    # 创建生成器和判别器
    generator = UNetGenerator1D(
        noise_dim=noise_dim,
        bis_dim=bis_dim,
        output_dim=eeg_dim,
        hidden_dim=64
    )

    discriminator = DualPathDiscriminator(
        input_dim=eeg_dim,
        hidden_dim=64,
        bis_prediction=True
    )

    print(f"Generator parameters: {sum(p.numel() for p in generator.parameters()):,}")
    print(f"Discriminator parameters: {sum(p.numel() for p in discriminator.parameters()):,}")

    # 测试生成器
    test_noise = torch.randn(batch_size, noise_dim)
    test_bis = torch.randn(batch_size, bis_dim)

    with torch.no_grad():
        output = generator(test_noise, test_bis)

    print(f"Generated EEG shape: {output.shape}")

    # 测试判别器
    with torch.no_grad():
        adv_out, bis_pred = discriminator(output)

    print(f"Adversarial output shape: {adv_out.shape}")
    print(f"BIS prediction shape: {bis_pred.shape if bis_pred is not None else 'None'}")
    print(f"Adversarial output range: [{adv_out.min():.3f}, {adv_out.max():.3f}]")
    if bis_pred is not None:
        print(f"BIS prediction range: [{bis_pred.min():.3f}, {bis_pred.max():.3f}]")