import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import os

# 超参数设置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 128
latent_dim = 20
num_epochs = 20
learning_rate = 3e-4

# 数据加载与预处理
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5,), (0.5,))  # 将像素值归一化到[-1, 1]
])

train_dataset = torchvision.datasets.MNIST(
    root='data', train=True, transform=transform, download=False)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

# CVAE模型定义
class CVAE(nn.Module):
    def __init__(self, latent_dim):
        super(CVAE, self).__init__()
        self.latent_dim = latent_dim
        self.label_emb = nn.Embedding(10, 10)  # 标签嵌入层
        
        # 编码器
        self.encoder = nn.Sequential(
            nn.Linear(28*28 + 10, 512),
            nn.ReLU(),
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Linear(256, 2*latent_dim)  # 输出均值和对数方差
        )
        
        # 解码器
        self.decoder = nn.Sequential(
            nn.Linear(latent_dim + 10, 256),
            nn.ReLU(),
            nn.Linear(256, 512),
            nn.ReLU(),
            nn.Linear(512, 28*28),
            nn.Tanh()  # 输出范围[-1, 1]
        )

    def encode(self, x, c):
        c = self.label_emb(c)
        x = torch.cat([x.view(-1, 28*28), c], dim=1)
        h = self.encoder(x)
        mu, log_var = torch.chunk(h, 2, dim=1)
        return mu, log_var

    def reparameterize(self, mu, log_var):
        std = torch.exp(0.5*log_var)
        eps = torch.randn_like(std)
        return mu + eps*std

    def decode(self, z, c):
        c = self.label_emb(c)
        z = torch.cat([z, c], dim=1)
        return self.decoder(z).view(-1, 1, 28, 28)

    def forward(self, x, c):
        mu, log_var = self.encode(x, c)
        z = self.reparameterize(mu, log_var)
        x_recon = self.decode(z, c)
        return x_recon, mu, log_var

# 初始化模型和优化器
model = CVAE(latent_dim).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# 训练函数
def train():
    model.train()
    for epoch in range(num_epochs):
        total_loss = 0
        for batch_idx, (images, labels) in enumerate(train_loader):
            images = images.to(device)
            labels = labels.to(device)
            
            # 前向传播
            recon_images, mu, log_var = model(images, labels)
            
            # 计算损失
            recon_loss = F.mse_loss(recon_images, images, reduction='sum')
            kl_div = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
            loss = (recon_loss + kl_div) / images.size(0)
            
            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
            
        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {total_loss/len(train_loader):.4f}')
        visualize_generation(epoch+1)

# 修改后的生成样本保存函数
def visualize_generation(epoch):
    model.eval()
    with torch.no_grad():
        # 创建保存目录
        import os
        save_dir = "./generated_images"
        os.makedirs(save_dir, exist_ok=True)
        
        # 生成0-9的数字各一个
        z = torch.randn(10, latent_dim).to(device)
        c = torch.arange(0, 10).long().to(device)
        sample = model.decode(z, c).cpu()
        
        # 反归一化
        sample = sample * 0.5 + 0.5
        
        # 创建画布并保存
        fig, axes = plt.subplots(1, 10, figsize=(20, 2))
        for i in range(10):
            axes[i].imshow(sample[i].squeeze(), cmap='gray')
            axes[i].axis('off')
        
        # 保存图片并关闭画布
        plt.savefig(f"{save_dir}/epoch_{epoch:03d}.png", bbox_inches='tight', dpi=300)
        plt.close(fig)  # 重要：防止内存泄漏

        # 同时保存原始张量数据
        #torch.save(sample, f"{save_dir}/epoch_{epoch:03d}.pt")

# 运行训练
if __name__ == "__main__":
    train()
    
    # 保存模型
    #torch.save(model.state_dict(), 'cvae_mnist.pth')
