import torchvision
import torch 
from torchvision import datasets, transforms
import torch.nn as nn 
from torch.utils.data import DataLoader, Dataset
import matplotlib.pyplot as plt 
import os 
from model import Discriminator, Generator

os.environ["CUDA_VISIBLE_DEVICES"] = "1"

def generate_fake_images(g_model, latent_dim, n_samples, fake_label=0, device="cpu"):
    latent_vec = torch.rand(latent_dim * n_samples)
    latent_vec = latent_vec.view((n_samples, latent_dim))
    latent_vec = latent_vec.to(device)

    images = g_model(latent_vec)

    return images

def loss_func(x, labels):
    return nn.BCELoss()(x, labels)

if __name__ == "__main__":
    batch_size = 32
    latent_dim = 100

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5,), std=(0.5,))
    ])
    data_train = datasets.MNIST(root = "./data/",
                            transform=transform,
                            train = True,
                            download = True)

    dataloader = DataLoader(data_train, batch_size=batch_size, shuffle=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("device is :" + str(device))

    discriminator = Discriminator(device=device)
    generator = Generator(latent_dim=latent_dim, device=device)
    discriminator.to(device)
    generator.to(device)

    dis_optimizer = torch.optim.Adam(discriminator.parameters(), lr=0.0002, betas=(0.5, 0.999))
    gene_optimizer = torch.optim.Adam(generator.parameters(), lr=0.0002, betas=(0.5, 0.999))

    epoch = 0
    while True:
        epoch += 1
        report_loss = 0
        report_dis_loss = 0
        index = 0
        for true_image, _ in dataloader:
            batch_size = len(true_image)
            true_image = true_image.to(device)
            index += 1
            discriminator.train()
            generator.train()
            dis_optimizer.zero_grad()

            true_image_labels = torch.ones(batch_size).to(device)
            fake_image_labels = torch.zeros(batch_size).to(device)
            fake_image = generate_fake_images(generator, latent_dim, batch_size, device=device)
            fake_image = fake_image.detach().to(device)
            
            dis_result = discriminator(true_image).view(-1)
            dis_real_loss = loss_func(dis_result, true_image_labels)

            dis_result = discriminator(fake_image).view(-1)
            dis_fake_loss = loss_func(dis_result, fake_image_labels)
            
            dis_loss = dis_real_loss + dis_fake_loss
            dis_loss.backward()
            dis_optimizer.step()
            report_dis_loss += dis_loss.item()

            # generator optim
            gene_optimizer.zero_grad()
    
            n_samples = batch_size
            
            latent_vec = torch.rand(latent_dim * n_samples)
            latent_vec = latent_vec.view((n_samples, latent_dim))
            latent_vec = latent_vec.to(device)

            g_out = generator(latent_vec)
            z_out = discriminator(g_out).view(-1)
            g_loss = loss_func(z_out, true_image_labels)
            g_loss.backward()
            gene_optimizer.step()

            report_loss += g_loss.item()

            d_mean = z_out.mean()

            if index % 100 == 0 :
                # 查看学习率
                # print(dis_optimizer.param_groups[0]['lr'])
                print("epoch is " + str(epoch) + " index is " + str(index) + " gloss is " + str(g_loss.item()) + " " + "dloss is " + str(dis_loss.item()) + " d_mean is " + str(d_mean.item()))
                report_dis_loss = 0
                report_loss = 0

        torch.save(generator.state_dict(), "./gan_model_little.pkl")
        if epoch == 50:
            break
        ## 学习率缩减
        # ExpLR.step()
        print("model has been saved!")
                




