# new-model.py
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from pystct import sdct_torch, isdct_torch
from noises import add_noise

class DoubleConv(nn.Module):
    """(convolution => [BN] => ReLU) * 2"""

    def __init__(self, in_channels, out_channels, mid_channels=None):
        super().__init__()
        if not mid_channels:
            mid_channels = out_channels
        self.double_conv = nn.Sequential(
            nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(mid_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.double_conv(x)

class Down(nn.Module):
    '''
    首先是Down类，它将包含一个2D卷积层来降低频谱数据的维度，后跟一个批归一化层
    和一个ReLU激活函数。此外，我们将使用最大池化来减少数据的空间维度
    '''
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.maxpool_conv = nn.Sequential(
            nn.MaxPool2d(2),
            DoubleConv(in_channels, out_channels)
        )

    def forward(self, x : torch.Tensor) -> torch.Tensor:
        return self.maxpool_conv(x)

class Up(nn.Module):

    def __init__(self, in_channels : int, out_channels : int, bilinear : bool = True):
        super().__init__()
        self.bilinear = bilinear
        
        if bilinear:
            self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
            # 注意这里不再需要 in_channels//2 因为我们直接使用 bilinear 上采样后的结果
            self.conv_reduce = nn.Conv2d(in_channels, out_channels, kernel_size=1)
        else:
            self.up = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)
        
        self.conv = DoubleConv(out_channels * 2, out_channels)

    def forward(self, x1 : torch.Tensor, x2 : torch.Tensor) -> torch.Tensor:
        # # 确保输入是4D的，即 [batch_size, channels, height, width]
        # if container_audio.dim() == 3:
        #     container_audio = container_audio.unsqueeze(1)  # 添加通道维度

        if self.bilinear:
            x1 = self.up(x1)
            x1 = self.conv_reduce(x1)
        else:
            x1 = self.up(x1)
        
        # input is CHW
        diffY = x2.size()[2] - x1.size()[2]
        diffX = x2.size()[3] - x1.size()[3]

        x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                        diffY // 2, diffY - diffY // 2])

        x = torch.cat([x2, x1], dim=1)
        x = self.conv(x)
        return x

# Assuming ScalarMultiply is suitable for audio data, no changes needed here.

class PrepHidingNet(nn.Module):
    def __init__(self, n_channels : int, n_classes : int, bilinear : bool = False, transform : str = 'cosine'):
        super(PrepHidingNet, self).__init__()
        self._transform = transform
        
        self.n_channels = n_channels
        self.n_classes = n_classes
        self.bilinear = bilinear

        self.inc = DoubleConv(n_channels, 64)
        
        # 将down模块放入ModuleList
        self.down_blocks = nn.ModuleList([
            Down(64, 128),
            Down(128, 256),
            Down(256, 512)
        ])
        
        # 将up模块放入ModuleList
        self.up_blocks = nn.ModuleList([
            Up(512, 256, bilinear),
            Up(256, 128, bilinear),
            Up(128, 64, bilinear)
        ])
        
        self.outc = nn.Conv2d(64, n_classes, kernel_size=1)

    def forward(self, secret_audio : torch.Tensor) -> torch.Tensor:
        # 假设 secret_audio 原始形状为 [1, 2, 1024, 256]
        N, C, H, W = secret_audio.shape  # N=1, C=2, H=1024, W=256

        # 我们希望将C=2个通道展开到宽度方向，使宽度变为W*2=512，从而得到 [1,1,1024,512]

        # 首先，将通道维度移动到最后，这样便于直接用view进行展开
        # 形状变换顺序： [N, C, H, W] -> [N, H, W, C]
        secret_audio = secret_audio.permute(0, 2, 3, 1).contiguous()

        # 现在 shape: [1,1024,256,2]
        # 将C=2与宽度W=256拼接成 W*2=512
        secret_audio = secret_audio.view(N, H, W * C)  # [1,1024,512]

        # 再加回通道维度 (现在只需要1个通道，因为已将2个通道折叠到宽度上)
        secret_audio = secret_audio.unsqueeze(1)       # [1,1,1024,512]

        # 此时的secret_audio已经是实际元素重排后的结果，而不仅仅是简单的view变换。

        # 初始卷积
        x = self.inc(secret_audio)
        
        # 存储下采样特征用于跳跃连接
        down_outputs = [x]
        
        # 下采样路径
        for down in self.down_blocks:
            x = down(x)
            down_outputs.append(x)
        
        # 上采样路径
        for i, up in enumerate(self.up_blocks):
            x = up(x, down_outputs[-2-i])  # 使用对应的跳跃连接
            
        return self.outc(x)


class RevealNet(nn.Module):
    def __init__(self, n_channels : int, n_classes : int, bilinear : bool = False):
        super(RevealNet, self).__init__()
        
        self.n_channels = n_channels
        self.n_classes = n_classes
        self.bilinear = bilinear

        
        self.unshuffler : nn.PixelUnshuffle = nn.PixelUnshuffle(2) # [1, 1, 1024, 512] -> [1, 2, 1024, 256]
        self.inc = DoubleConv(n_channels, 64)
        
        # 将down模块放入ModuleList
        self.down_blocks = nn.ModuleList([
            Down(64, 128),
            Down(128, 256),
            Down(256, 512)
        ])
        
        # 将up模块放入ModuleList
        self.up_blocks = nn.ModuleList([
            Up(512, 256, bilinear),
            Up(256, 128, bilinear),
            Up(128, 64, bilinear)
        ])
        
        self.outc = nn.Conv2d(64, n_classes, kernel_size=1)

    def forward(self, container_audio : torch.Tensor) -> torch.Tensor:
        N, C, H, W = container_audio.shape

        # 初始卷积
        x = self.inc(container_audio)
        
        # 存储下采样特征用于跳跃连接
        down_outputs = [x]
        
        # 下采样路径
        for down in self.down_blocks:
            x = down(x)
            down_outputs.append(x)
        
        # 上采样路径
        for i, up in enumerate(self.up_blocks):
            x = up(x, down_outputs[-2-i])  # 使用对应的跳跃连接
            
        x = self.outc(x)

        # RevealNet中（揭示阶段）的重排逆操作示例：
        # container 已为 [1,1,1024,512]
        revealed = x.squeeze(1)  # [1,1024,512]

        # 当初是把2个通道并入宽度方向得到512，这里需要拆开
        # 512 = 256 * 2，因此：
        revealed = revealed.view(N, H, 256, 2)  # [1,1024,256,2]

        # 与隐写阶段相反的permute：
        revealed = revealed.permute(0, 3, 1, 2).contiguous() # [1,2,1024,256]

        # 最后根据你的需求去掉多余的通道，只保留一个：
        revealed = revealed[:,0:1,:,:]  # [1,1,1024,256]

        return revealed


class StegoUNet(nn.Module):

    def __init__(self, transform : str = 'cosine', add_noise : bool = False, noise_kind : list[str] = None, noise_amplitude : list[float] = None):
        super().__init__()
        self._initialize_weights() #----> 添加的，防止NaN
        
        # Sub-networks
        self.PHN : PrepHidingNet = PrepHidingNet(n_channels=1,n_classes=1,transform=transform)  # 前处理隐藏网络
        self.RN : RevealNet = RevealNet(n_channels=1,n_classes=1)  # 揭示网络

        # 转换参数
        self.transform = transform
        self.add_noise = add_noise
        self.noise_kind = noise_kind
        self.noise_amplitude = noise_amplitude

    def forward(self, secret : torch.Tensor, cover : torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: 
        # 将隐写音频通过隐写网络，得到隐写信号
        hidden_signal : torch.Tensor = self.PHN(secret)

        # 残差连接，将隐写信号与宿主音频相加得到容器音频
        container : torch.Tensor = cover + hidden_signal

        if self.add_noise:
            # 如果添加噪声，将容器音频从频域转换为时域
            container_wav = isdct_torch(
                container.squeeze(0).squeeze(0),
                frame_length=4096,
                frame_step=62,
                window=torch.hamming_window
            )
            # 添加噪声
            noise = add_noise(
                container_wav,
                self.noise_kind[np.random.randint(0, len(self.noise_kind))],
                self.noise_amplitude[np.random.randint(0, len(self.noise_amplitude))]
            ).type(torch.float32)
            # 将添加了噪声的信号转换回频域
            spectral_noise = sdct_torch(
                noise,
                frame_length=4096,
                frame_step=62
            ).unsqueeze(0)

            # 将噪声添加到频域的容器音频中
            container += spectral_noise
        else:
            # 如果不添加噪声，仍然从频域转换到时域再回到频域
            container_wav = isdct_torch(
                container.squeeze(0).squeeze(0),
                frame_length=1024,
                frame_step=130,
                window=torch.hamming_window
            )
            container = sdct_torch(
                container_wav,
                frame_length=1024,
                frame_step=130
            ).unsqueeze(0)

        # 使用揭示网络从容器音中恢复隐写音频
        container = container.unsqueeze(1)
        revealed = self.RN(container)

        return container, revealed
    
    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)