import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np

#
# 2个epochs，在普通台式机上大概 1分钟
#

# 定义生成器网络
class Generator(nn.Module):
    def __init__(self, latent_dim, img_shape):
        super(Generator, self).__init__()
        self.img_shape = img_shape

        def block(in_feat, out_feat, normalize=True):
            layers = [nn.Linear(in_feat, out_feat)]
            if normalize:
                layers.append(nn.BatchNorm1d(out_feat, 0.8))
            layers.append(nn.LeakyReLU(0.2, inplace=True))
            return layers

        self.model = nn.Sequential(
            *block(latent_dim, 128, normalize=False),
            *block(128, 256),
            *block(256, 512),
            *block(512, 1024),
            nn.Linear(1024, int(np.prod(img_shape))),
            nn.Tanh()
        )

    def forward(self, z):
        img = self.model(z)
        img = img.view(img.size(0), *self.img_shape)
        return img

# 定义判别器网络
class Discriminator(nn.Module):
    def __init__(self, img_shape):
        super(Discriminator, self).__init__()

        self.model = nn.Sequential(
            nn.Linear(int(np.prod(img_shape)), 512),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(512, 256),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(256, 1),
            nn.Sigmoid()
        )

    def forward(self, img):
        img_flat = img.view(img.size(0), -1)
        validity = self.model(img_flat)
        return validity

# 训练参数
latent_dim = 100
img_shape = (1, 28, 28)  # MNIST图像尺寸
batch_size = 64
epochs = 2
lr = 0.0002
b1 = 0.5
b2 = 0.999

# 初始化网络
# 1、生成网络
generator = Generator(latent_dim, img_shape)
# 2、判别器网络
discriminator = Discriminator(img_shape)

# 损失函数和优化器
adversarial_loss = nn.BCELoss()
optimizer_G = optim.Adam(generator.parameters(), lr=lr, betas=(b1, b2))
optimizer_D = optim.Adam(discriminator.parameters(), lr=lr, betas=(b1, b2))

# 数据加载
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize([0.5], [0.5])
])

dataset = torchvision.datasets.MNIST(
    root='./data', train=True, download=True, transform=transform
)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

# 训练循环
# 在训练参数部分添加模型保存路径
model_path = './gan_models'
import os
import time
from datetime import datetime

# 创建模型保存目录
os.makedirs(model_path, exist_ok=True)

# 如果有之前的模型，可以加载继续训练
try:
    # 修改加载方式，从保存的字典中提取模型状态
    generator_checkpoint = torch.load(os.path.join(model_path, 'generator.pth'))
    generator.load_state_dict(generator_checkpoint['model_state_dict'])

    discriminator_checkpoint = torch.load(os.path.join(model_path, 'discriminator.pth'))
    discriminator.load_state_dict(discriminator_checkpoint['model_state_dict'])
    print("加载已有模型成功，继续训练...")
except Exception as e:
    print(f"加载模型失败: {str(e)}")
    print("未找到已有模型或模型损坏，从头开始训练...")
except:
    print("未找到已有模型，从头开始训练...")

# 记录开始时间
start_time = time.time()
print(f"训练开始时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")

for epoch in range(epochs):
    for i, (imgs, _) in enumerate(dataloader):

        # 真实和假标签
        valid = torch.ones(imgs.size(0), 1)
        fake = torch.zeros(imgs.size(0), 1)

        # ---------------------
        #  训练判别器
        # ---------------------
        optimizer_D.zero_grad()

        # 真实图像损失
        real_loss = adversarial_loss(discriminator(imgs), valid)

        # 假图像损失
        z = torch.randn(imgs.size(0), latent_dim)
        gen_imgs = generator(z)
        fake_loss = adversarial_loss(discriminator(gen_imgs.detach()), fake)

        d_loss = (real_loss + fake_loss) / 2
        d_loss.backward()
        optimizer_D.step()

        # -----------------
        #  训练生成器
        # -----------------
        optimizer_G.zero_grad()

        z = torch.randn(imgs.size(0), latent_dim)
        gen_imgs = generator(z)
        g_loss = adversarial_loss(discriminator(gen_imgs), valid)
        g_loss.backward()
        optimizer_G.step()

        # 打印训练进度
        if i % 100 == 0:
            print(f"[Epoch {epoch}/{epochs}] [Batch {i}/{len(dataloader)}] "
                  f"[D loss: {d_loss.item():.4f}] [G loss: {g_loss.item():.4f}]")

# 保存模型（包括优化器状态以便继续训练）
# 修改保存方式（二选一）
# # 方式1：只保存模型参数（简单加载）
# torch.save(generator.state_dict(), os.path.join(model_path, 'generator.pth'))
# torch.save(discriminator.state_dict(), os.path.join(model_path, 'discriminator.pth'))

# 或者方式2：保存完整检查点（包含更多信息）
with torch.no_grad():
	torch.save({
	    'model_state_dict': discriminator.state_dict(),
	    'optimizer_state_dict': optimizer_D.state_dict(),
	    'epoch': epochs
	}, os.path.join(model_path, 'discriminator.pth'))

	# 保存模型（包括优化器状态以便继续训练）
	torch.save({
	    'model_state_dict': generator.state_dict(),
	    'optimizer_state_dict': optimizer_G.state_dict(),
	    'epoch': epochs
	}, os.path.join(model_path, 'generator.pth'))
	
# 在训练循环结束后添加
end_time = time.time()
training_time = end_time - start_time
print(f"训练结束时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print(f"总训练时间: {training_time//3600:.0f}小时 {(training_time%3600)//60:.0f}分钟 {training_time%60:.2f}秒")

# 在生成示例图像前，将生成器切换到评估模式
generator.eval()

# 生成示例图像
with torch.no_grad():  # 禁用梯度计算
    z = torch.randn(1, latent_dim)
    gen_img = generator(z).detach().numpy()
    plt.imshow(gen_img[0, 0], cmap='gray')
    plt.show()

# 如果需要继续训练，可以切换回训练模式
# generator.train()
# gen_img = generator(z).detach().numpy()
# plt.imshow(gen_img[0, 0], cmap='gray')
# plt.show()
