import torch
from torch import nn
from tqdm.auto import tqdm  # 显示进度条   pip后报错
from torchvision import transforms
from torchvision.datasets import MNIST  # 训练数据集 469个图像
from torchvision.utils import make_grid  # 将图像按网格排序
from torch.utils.data import DataLoader  # 加载数据集
import matplotlib.pyplot as plt

from models import gan


def show_tensor_images(image_tensor, num_images=25, size=(1, 28, 28)):
    images = image_tensor.detach().cpu().view(-1, *size)  # B C H W
    image_grid = make_grid(images[:num_images], nrow=5)  # nrow：一行摆多少张图
    plt.imshow(image_grid.permute(1, 2, 0).squeeze())  # tensor.permute 序列转换 np.transpose
    plt.show()


a = torch.ones(7, 1, 2, 2)  # B C W H    改randn试
# show_tensor_images(a, 7, (1, 2, 2))


def get_noise(n_samples, z_dim, device='cpu'):  # n_samples == batch_size
    return torch.randn(n_samples, z_dim, device=device)


def get_disc_loss(gen, disc, criterion, real, num_images, z_dim, device):
    fake_noise = get_noise(num_images, z_dim, device=device)
    fake = gen(fake_noise)  # [num_images,im_dim]   生成假图

    disc_fake_pred = disc(fake.detach())  # 打分
    disc_fake_loss = criterion(disc_fake_pred, torch.zeros_like(disc_fake_pred))  # 假图对应打分为0，与0计算损失

    disc_real_pred = disc(real)
    disc_real_loss = criterion(disc_real_pred, torch.ones_like(disc_real_pred))

    disc_loss = (disc_fake_loss + disc_real_loss) / 2

    return disc_loss


def get_gen_loss(gen, disc, criterion, num_images, z_dim, device):
    fake_noise = get_noise(num_images, z_dim, device=device)
    fake = gen(fake_noise)  # [num_images,im_dim]

    disc_fake_pred = disc(fake)
    gen_loss = criterion(disc_fake_pred, torch.ones_like(disc_fake_pred))  # generator要欺骗discriminator
    return gen_loss


criterion = nn.BCEWithLogitsLoss()  # sigmoid + BCELoss
n_epochs = 200
z_dim = 64
display_step = 500
batch_size = 128
lr = 0.0001
device = 'cpu'

dataloader = DataLoader(
    MNIST('E:/datasets', download=False, transform=transforms.ToTensor()),
    batch_size=batch_size,
    shuffle=True
)

gen = gan.MLP_Generator(input_dim=z_dim, output_dim=784).to(device)
gen_opt = torch.optim.Adam(gen.parameters(), lr=lr)

disc = gan.MLP_Discriminator(input_dim=784).to(device)
disc_opt = torch.optim.Adam(disc.parameters(), lr=lr)

cur_step = 0  # 当前步数
mean_generator_loss = 0
mean_discriminator_loss = 0
# test_generator = True
# gen_loss = False
# error = False
for epoch in range(n_epochs):
    for real, _ in tqdm(dataloader):
        cur_batch_size = len(real)

        real = real.view(cur_batch_size, -1).to(device)

        disc_opt.zero_grad()
        disc_loss = get_disc_loss(gen, disc, criterion, real, cur_batch_size, z_dim, device)
        disc_loss.backward(retain_graph=True)
        disc_opt.step()

        gen_opt.zero_grad()
        gen_loss = get_gen_loss(gen, disc, criterion, cur_batch_size, z_dim, device)
        gen_loss.backward()
        gen_opt.step()

        mean_discriminator_loss += disc_loss.item() / display_step
        mean_generator_loss += gen_loss.item() / display_step

        if cur_step % display_step == 0 and cur_step > 0:
            print(
                f"Step {cur_step}: Generator loss: {mean_generator_loss},discriminator loss: {mean_discriminator_loss}")
            fake_noise = get_noise(cur_batch_size, z_dim, device=device)
            fake = gen(fake_noise)
            show_tensor_images(fake)
            show_tensor_images(real)
            mean_generator_loss = 0
            mean_discriminator_loss = 0
        cur_step += 1
