import torch
import torch.nn as nn
import torch.optim as optim
import torchaudio
import matplotlib.pyplot as plt


# 生成器（Generator）
class Generator(nn.Module):
    def __init__(self, z_dim=100):
        super(Generator, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(z_dim, 128),
            nn.ReLU(),
            nn.Linear(128, 256),
            nn.ReLU(),
            nn.Linear(256, 512),
            nn.ReLU(),
            nn.Linear(512, 1024),
            nn.ReLU(),
            nn.Linear(1024, 80),  # 80表示梅尔频谱的时间步（例如：80个梅尔频率）
            nn.Tanh()  # 生成梅尔频谱，范围在[-1, 1]之间
        )

    def forward(self, z):
        return self.fc(z)


# 判别器（Discriminator）
class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.fc = nn.Sequential(
            nn.Linear(80, 512),  # 输入为梅尔频谱的时间步
            nn.LeakyReLU(0.2),
            nn.Linear(512, 256),
            nn.LeakyReLU(0.2),
            nn.Linear(256, 1),
            nn.Sigmoid()  # 输出判定是“真”还是“假”
        )

    def forward(self, x):
        return self.fc(x)


# 初始化生成器和判别器
z_dim = 100
generator = Generator(z_dim)
discriminator = Discriminator()

# 优化器
lr = 0.0002
g_optimizer = optim.Adam(generator.parameters(), lr=lr, betas=(0.5, 0.999))
d_optimizer = optim.Adam(discriminator.parameters(), lr=lr, betas=(0.5, 0.999))

# 损失函数
criterion = nn.BCELoss()


# 加载数据（假设已经提取了梅尔频谱特征，取一个示例）
def load_example_mel_spectrogram():
    # 假设这是一个真实梅尔频谱的示例，实际数据应从音频文件中提取
    mel = torch.rand((80))  # 生成一个假的梅尔频谱数据
    return mel.unsqueeze(0)  # 扩展维度以适应网络


# 训练GAN
num_epochs = 1000
for epoch in range(num_epochs):
    # 真实数据
    real_data = load_example_mel_spectrogram()
    real_labels = torch.ones(real_data.size(0), 1)  # 标签为1表示真实数据

    # 假数据
    z = torch.randn(real_data.size(0), z_dim)  # 随机噪声
    fake_data = generator(z)
    fake_labels = torch.zeros(real_data.size(0), 1)  # 标签为0表示假数据

    # 训练判别器
    discriminator.zero_grad()
    real_loss = criterion(discriminator(real_data), real_labels)
    fake_loss = criterion(discriminator(fake_data.detach()), fake_labels)
    d_loss = (real_loss + fake_loss) / 2
    d_loss.backward()
    d_optimizer.step()

    # 训练生成器
    generator.zero_grad()
    g_loss = criterion(discriminator(fake_data), real_labels)  # 生成器希望判别器判定为真实
    g_loss.backward()
    g_optimizer.step()

    if epoch % 100 == 0:
        print(f"Epoch [{epoch}/{num_epochs}], D Loss: {d_loss.item()}, G Loss: {g_loss.item()}")

    # 可视化生成的梅尔频谱（只显示最后一次生成的结果）
    if epoch == num_epochs - 1:
        plt.figure(figsize=(10, 4))
        plt.imshow(fake_data.detach().numpy(), aspect='auto', origin='lower')
        plt.title(f"Generated Mel Spectrogram - Epoch {epoch}")
        plt.colorbar()
        plt.show()

# 测试阶段：使用训练好的生成器进行语音生成
z_test = torch.randn(1, z_dim)  # 创建一个新的随机噪声向量
generated_mel_spectrogram = generator(z_test)

# 可视化生成的梅尔频谱
plt.figure(figsize=(10, 4))
plt.imshow(generated_mel_spectrogram.detach().numpy(), aspect='auto', origin='lower')
plt.title("Generated Mel Spectrogram from Test Data")
plt.colorbar()
plt.show()
