import torch
import torch.optim as optim
from matplotlib import pyplot as plt
from torch import nn

from data_manager import DataManager
from models_manger import Discriminator, Generator

torch.manual_seed(0)
"""
控制随机初始化
    影响 权重初始化（如 nn.Linear、nn.Conv2d 的初始参数）。
    影响 数据打乱（如 DataLoader(shuffle=True)）。
    影响 Dropout、随机增强（如 transforms.RandomCrop）等涉及随机性的操作。"""


def test():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    adversarial_loss = nn.BCELoss().to(device)

    generator = Generator(100,28).to(device)
    discriminator = Discriminator(28).to(device)

    data_manager = DataManager("mnist", 64,{"normalize":[0.5,0.5]})

    optimizer_g = optim.Adam(generator.parameters(), lr=0.001,betas=(0.5,0.999))
    optimizer_d = optim.Adam(discriminator.parameters(), lr=0.001,betas=(0.5,0.999))

    train_loader, test_loader = data_manager.get_data_loader()
    epochs = 40

    # 定义训练轮数n_epochs =20
    for epoch in range(epochs):
        # 进入训练模式
        generator_loss = 0
        discriminator_loss = 0
        # 遍历训练数据集
        for batch_idx, (img, _) in enumerate(train_loader):

            # 真实标签 (全 1)
            valid = torch.ones((img.size(0), 1), device=device, dtype=torch.float, requires_grad=False)

            # 假标签 (全 0)
            fake = torch.zeros((img.size(0), 1), device=device, dtype=torch.float, requires_grad=False)

            # 真实图像
            real_images = img.to(device, dtype=torch.float)

            # 生成器优化器梯度清零
            optimizer_g.zero_grad()

            # 随机噪声输入 (正态分布)
            z = torch.randn((img.size(0), 100), device=device, dtype=torch.float)

            gen_images = generator(z)

            gen_loss= adversarial_loss(discriminator(gen_images), valid)
            generator_loss += gen_loss.item()

            gen_loss.backward()
            optimizer_g.step()

            optimizer_d.zero_grad()

            real_loss = adversarial_loss(discriminator(real_images), valid)
            fake_loss = adversarial_loss(discriminator(gen_images.detach()), fake)

            d_loss = (real_loss + fake_loss)/2
            discriminator_loss += d_loss.item()
            d_loss.backward()
            optimizer_d.step()


        print(f"epoch ：{epoch}，  generator_loss：{generator_loss/len(train_loader.dataset)}，discriminator_loss:{discriminator_loss/len(train_loader.dataset)}")
        # 将最后生成的图片转换为numpy数组
        images=gen_images.view(-1,28,28).detach().cpu().numpy()
        #可视化生成的样本
        fig,axs=plt.subplots(1,12, figsize=(10,10))
        for i in range(12):
            axs[i].imshow(images[i],cmap="gray")
            axs[i].axis("off")
        plt.show()









if __name__ == '__main__':
    test()
