import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
import time
import os

# 1个epoch 大概用时 2分钟 30秒

class SimpleUNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.down1 = self._block(1, 32)
        self.down2 = self._block(32, 64)
        self.down3 = self._block(64, 128)

        self.mid = nn.Sequential(
            nn.Conv2d(128, 256, 3, padding=1),
            nn.ReLU(),
            nn.Conv2d(256, 128, 3, padding=1),
            nn.ReLU()
        )

        self.up1 = self._block(256, 64)
        self.up2 = self._block(128, 32)
        self.up3 = nn.Conv2d(64, 1, 3, padding=1)

    def _block(self, in_ch, out_ch):
        return nn.Sequential(
            nn.Conv2d(in_ch, out_ch, 3, padding=1),
            nn.ReLU(),
            nn.Conv2d(out_ch, out_ch, 3, padding=1),
            nn.ReLU()
        )

    def forward(self, x, t):
        x1 = self.down1(x)
        x2 = self.down2(nn.MaxPool2d(2)(x1))
        x3 = self.down3(nn.MaxPool2d(2)(x2))

        m = self.mid(nn.MaxPool2d(2)(x3))

        u1 = self.up1(torch.cat([
            nn.Upsample(scale_factor=2)(m),
            nn.Upsample(size=(6,6))(x3)
        ], dim=1))

        u2 = self.up2(torch.cat([
            nn.Upsample(scale_factor=2)(u1),
            nn.Upsample(size=(12,12))(x2)
        ], dim=1))

        u3 = self.up3(torch.cat([
            nn.Upsample(scale_factor=2)(u2),
            nn.Upsample(size=(24,24))(x1)
        ], dim=1))

        return nn.Upsample(size=(28,28))(u3)

class Diffusion:
    def __init__(self, T=1000, beta_start=1e-4, beta_end=0.02):
        self.T = T
        self.betas = torch.linspace(beta_start, beta_end, T)
        self.alphas = 1. - self.betas
        self.alpha_bars = torch.cumprod(self.alphas, dim=0)

    def forward_process(self, x0, t):
        noise = torch.randn_like(x0)
        alpha_bar = self.alpha_bars[t].view(-1, 1, 1, 1)
        xt = torch.sqrt(alpha_bar) * x0 + torch.sqrt(1 - alpha_bar) * noise
        return xt, noise

    def reverse_process(self, model, xt, t):
        return model(xt, t)

def train_diffusion(resume=True, epochs=20, save_interval=10):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    start_time = time.time()

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5,), (0.5,))
    ])

    dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
    dataloader = DataLoader(dataset, batch_size=64, shuffle=True)

    model = SimpleUNet().to(device)
    optimizer = optim.Adam(model.parameters(), lr=1e-4)
    diffusion = Diffusion()

    if resume:
        try:
            model.load_state_dict(torch.load('best_model.pth'))
            print("成功加载最优模型，继续训练...")
        except:
            print("未找到最优模型，从头开始训练...")

    best_loss = float('inf')

    for epoch in range(epochs):
        epoch_loss = 0

        for i, (x0, _) in enumerate(dataloader):
            x0 = x0.to(device)
            t = torch.randint(0, diffusion.T, (x0.size(0),)).to(device)

            xt, noise = diffusion.forward_process(x0, t)
            pred_noise = model(xt, t)

            loss = nn.MSELoss()(pred_noise, noise)
            epoch_loss += loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i % 1 == 0:
                print(f"-- Epoch {epoch}, Batch {i}, Loss: {loss.item():.4f}")

        avg_loss = epoch_loss / len(dataloader)
        if avg_loss < best_loss:
            best_loss = avg_loss
            torch.save(model.state_dict(), 'best_7_model.pth')
            print(f"保存最优模型，当前最佳loss: {best_loss:.4f}")

        if epoch % save_interval == 0:
            with torch.no_grad():
                sample = torch.randn(1, 1, 28, 28).to(device)
                for t in reversed(range(0, diffusion.T, 10)):
                    t_tensor = torch.tensor([t]).to(device)
                    sample = diffusion.reverse_process(model, sample, t_tensor)
                plt.imshow(sample.cpu().squeeze(), cmap='gray')
                plt.savefig(f"sample_epoch{epoch}.png")

    total_time = time.time() - start_time
    print(f"训练完成，总耗时: {total_time//60:.0f}分{total_time%60:.2f}秒")

if __name__ == "__main__":
    train_diffusion(resume=True)
