#models/discriminator.py
import torch
import torch.nn as nn
import torch.nn.functional as F


class Discriminator(nn.Module):
    def __init__(self, input_dim):
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        super(Discriminator, self).__init__()
        self.model = nn.Sequential(
            nn.Linear(input_dim, 128),
            nn.BatchNorm1d(128),  # 添加批归一化
            nn.LeakyReLU(0.2),
            nn.Dropout(0.3),
            nn.Linear(128, 32),
            nn.BatchNorm1d(32),
            nn.LeakyReLU(0.2),
            nn.Linear(32, 1),
            nn.Sigmoid(),
        )
        self.to(device)

    def forward(self, x):
        if x.dim() == 1:
            x = x.unsqueeze(0)
        return self.model(x)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def update_discriminator(real, fake, opt_d, discriminator, label_smoothing=0.9):
    # 确保数据在正确的设备上
    device = next(discriminator.parameters()).device
    assert real.shape[1] == fake.shape[1], "输入维度不匹配"
    real = real.to(device)
    fake = fake.to(device)
    opt_d.zero_grad()

    # 判别器损失计算
    real_pred = discriminator(real)
    fake_pred = discriminator(fake.detach())  # 阻断生成器梯度

    # 使用标签平滑化
    real_labels = torch.ones_like(real_pred).to(device) * label_smoothing
    fake_labels = torch.zeros_like(fake_pred).to(device)

    # 添加 eps 提高数值稳定性
    eps = 1e-7
    real_loss = F.binary_cross_entropy(real_pred.clamp(eps, 1 - eps), real_labels)
    fake_loss = F.binary_cross_entropy(fake_pred.clamp(eps, 1 - eps), fake_labels)


    # 添加特征匹配损失

    d_loss = real_loss + fake_loss

    # 梯度裁剪（θ_D更新）
    d_loss.backward()
    torch.nn.utils.clip_grad_norm_(discriminator.parameters(), 0.05)
    total_grad = 0
    for p in discriminator.parameters():
        total_grad += p.grad.data.norm(2).item()

    opt_d.step()

    return {
        "total_loss": d_loss.item(),
        "total_grad": total_grad,
        "real_loss": real_loss.item(),
        "fake_loss": fake_loss.item(),
    }