import torch
import torch.nn as nn
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torchvision.utils import save_image
import matplotlib.pyplot as plt
import numpy as np
import configparser

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 超参数
#读取设置的对象
config = configparser.ConfigParser()
config.read('config.ini', encoding='utf-8')
z_dim = config.getint('BEGAN','z_dim')
k = config.getfloat('BEGAN','k')
gamma = config.getfloat('BEGAN','gamma')

img_channels = config.getint('image','图片通道')
img_size = config.getint('image','图片分辨率')
batch_size = config.getint('config','批处理样本数量')
learning_rate = config.getfloat('config','学习率')
num_epochs = config.getfloat('config','学习次数')
learning_rate_old=learning_rate#之前的学习率是否和现在的相同，避免反复定义优化器
# 加载 MNIST 数据集
train_dataset = MNIST(root="./data", train=True, download=True, transform=ToTensor())

# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)


# 定义 BEGAN 模型
class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()

        self.model = nn.Sequential(
            nn.Linear(z_dim, 128),
            nn.ReLU(),
            nn.Linear(128, 256),
            nn.ReLU(),
            nn.Linear(256, 512),
            nn.ReLU(),
            nn.Linear(512, img_channels * img_size * img_size),
            nn.Tanh()
        )

    def forward(self, z):
        img = self.model(z)
        img = img.view(img.size(0), img_channels, img_size, img_size)
        return img


class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()

        self.model = nn.Sequential(
            nn.Linear(img_channels * img_size * img_size, 512),
            nn.LeakyReLU(0.2),
            nn.Linear(512, 256),
            nn.LeakyReLU(0.2),
            nn.Linear(256, 128),
            nn.LeakyReLU(0.2),
            nn.Linear(128, 1),
            nn.Sigmoid()
        )

    def forward(self, img):
        img = img.view(img.size(0), -1)
        validity = self.model(img)
        return validity


def gen_img_plot(model, test_input):
    prediction = np.squeeze(model(test_input).detach().cpu().numpy())
    plt.figure(figsize=(4, 4))
    for i in range(16):
        plt.subplot(4, 4, i + 1)
        plt.imshow((prediction[i] + 1) / 2)  # 确保prediction[i] + 1)/2输出的结果是在0-1之间
        plt.axis('off')
    plt.show()


# 创建生成器和判别器实例
#generator = Generator().to(device)
#discriminator = Discriminator().to(device)
generator = torch.load(r'C:\Users\office\PycharmProjects\GAN\BEGAN\BEGAN_model\gen\10600.pth')
discriminator = torch.load(r'C:\Users\office\PycharmProjects\GAN\BEGAN\BEGAN_model\dis\10600.pth')
# 定义损失函数和优化器
criterion = nn.MSELoss()
g_optimizer = torch.optim.Adam(generator.parameters(), lr=learning_rate)
d_optimizer = torch.optim.Adam(discriminator.parameters(), lr=learning_rate)

# 训练
total_step = len(train_loader)
# 读取设置
config = configparser.ConfigParser()
epoch = 0
while 1:
    epoch += 1
    config.read('config.ini', encoding='utf-8')
    k = config.getfloat('BEGAN', 'k')
    gamma = config.getfloat('BEGAN', 'gamma')
    learning_rate = config.getfloat('config', '学习率')
    print('-----------------------------------')
    print('k是：'+str(k))
    print('gamma是：'+str(gamma))
    print('学习率是：'+str(learning_rate))
    if learning_rate_old != learning_rate:
        print('优化器已更新')
        g_optimizer = torch.optim.Adam(generator.parameters(), lr=learning_rate)
        d_optimizer = torch.optim.Adam(discriminator.parameters(), lr=learning_rate)

    for i, (images, _) in enumerate(train_loader):
        config.read('config.ini', encoding='utf-8')
        out = config.get('config', '输出模式')
        batch_size = images.size(0)
        images = images.to(device)

        # 创建真实和虚假标签
        real_labels = torch.ones(batch_size, 1).to(device)
        fake_labels = torch.zeros(batch_size, 1).to(device)

        # 训练判别器
        d_optimizer.zero_grad()

        # 计算判别器的损失函数
        real_outputs = discriminator(images)
        z = torch.randn(batch_size, z_dim).to(device)
        fake_images = generator(z)
        fake_outputs = discriminator(fake_images.detach())
        d_loss_real = criterion(real_outputs, real_labels)
        d_loss_fake = criterion(fake_outputs, fake_labels)
        d_loss = d_loss_real + d_loss_fake

        # 反向传播和优化判别器
        d_loss.backward()
        d_optimizer.step()

        # 训练生成器
        g_optimizer.zero_grad()

        # 计算生成器的损失函数
        fake_outputs = discriminator(fake_images)
        g_loss = criterion(fake_outputs, real_labels)

        # 反向传播和优化生成器
        g_loss.backward()
        g_optimizer.step()

        # 更新 k
        k_next = k + 0.001 * (gamma * d_loss_real - g_loss.item())
        k = min(max(k_next.item(), 0), 1)

        # 更新生成器和判别器的权重
        for group in d_optimizer.param_groups:
            for p in group['params']:
                p.data.clamp_(-0.01, 0.01)

        # 打印训练信息
        if (i + 1) % 2 == 0:
            print('Epoch [{}/{}], Step [{}/{}], d_loss: {:.4f}, g_loss: {:.4f}, k: {:.6f}'
              .format(epoch + 1, num_epochs, i + 1, total_step, d_loss.item(), g_loss.item(), k))
        if (out == '1'):
            z = torch.randn(batch_size, z_dim).to(device)
            #gen_img_plot(generator, z)
        elif (out == '2'):
            torch.save(generator, r'C:\Users\office\PycharmProjects\GAN\BEGAN_model\gen/gen_epoch_{}-gamma_{}-k_{}.pth'.format(epoch + 1,gamma,k))
            torch.save(discriminator, r'C:\Users\office\PycharmProjects\GAN\BEGAN_model\dis/dis_epoch_{}-gamma_{}-k_{}.pth'.format(epoch + 1,gamma,k))
            print('已保存！')
            # gen_img_plot(gen, epoch, test_input)
            # break
        elif (epoch % 100 == 0 and epoch != 0 or epoch == num_epochs):
            torch.save(generator, r'C:\Users\office\PycharmProjects\GAN\BEGAN_model\gen/gen_epoch_{}-gamma_{}-k_{}.pth'.format(epoch + 1,gamma,k))
            torch.save(discriminator, r'C:\Users\office\PycharmProjects\GAN\BEGAN_model\dis/dis_epoch_{}-gamma_{}-k_{}.pth'.format(epoch + 1,gamma,k))
            print('已保存！')
        else:
            pass
        if epoch % 100 == 0 or out == '1':
            # 保存生成的图像
            z = torch.randn(16, z_dim).to(device)
            fake_images = generator(z)
            save_image(fake_images, './img/epoch_{}-step_{}-gamma_{}-k_{}.png'.format(epoch + 1,i+1,gamma,k), nrow=4, normalize=True)
            print("测试图片已输出")
    learning_rate_old=learning_rate#读取旧的学习率


