import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import math

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 1. 数据加载和预处理
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5,), (0.5,))  # 归一化到[-1, 1]
])

train_dataset = datasets.MNIST('./data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST('./data', train=False, download=True, transform=transform)

train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=256, shuffle=False)

# 2. 定义扩散模型参数
class DiffusionConfig:
    def __init__(self):
        self.timesteps = 1000  # 扩散步数
        self.beta_start = 0.0001
        self.beta_end = 0.02
        
        # 计算beta schedule
        self.betas = torch.linspace(self.beta_start, self.beta_end, self.timesteps)
        self.alphas = 1. - self.betas
        self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
        self.alphas_cumprod_prev = F.pad(self.alphas_cumprod[:-1], (1, 0), value=1.0)
        self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
        self.sqrt_one_minus_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod)
        self.posterior_variance = self.betas * (1. - self.alphas_cumprod_prev) / (1. - self.alphas_cumprod)

config = DiffusionConfig()

# 3. 定义条件UNet模型
class ConditionalUNet(nn.Module):
    def __init__(self, num_classes=10):
        super(ConditionalUNet, self).__init__()
        
        # 时间步嵌入
        self.time_embed = nn.Sequential(
            nn.Linear(128, 256),
            nn.SiLU(),
            nn.Linear(256, 256),
        )
        
        # 类别嵌入
        self.class_embed = nn.Embedding(num_classes, 256)
        
        # 编码器
        self.enc1 = nn.Sequential(
            nn.Conv2d(1, 64, 3, padding=1),
            nn.GroupNorm(8, 64),
            nn.SiLU(),
            nn.Conv2d(64, 64, 3, padding=1),
            nn.GroupNorm(8, 64),
            nn.SiLU(),
        )
        self.enc2 = nn.Sequential(
            nn.Conv2d(64, 128, 3, stride=2, padding=1),
            nn.GroupNorm(8, 128),
            nn.SiLU(),
            nn.Conv2d(128, 128, 3, padding=1),
            nn.GroupNorm(8, 128),
            nn.SiLU(),
        )
        self.enc3 = nn.Sequential(
            nn.Conv2d(128, 256, 3, stride=2, padding=1),
            nn.GroupNorm(8, 256),
            nn.SiLU(),
            nn.Conv2d(256, 256, 3, padding=1),
            nn.GroupNorm(8, 256),
            nn.SiLU(),
        )
        
        # 中间层
        self.mid = nn.Sequential(
            nn.Conv2d(256, 256, 3, padding=1),
            nn.GroupNorm(8, 256),
            nn.SiLU(),
            nn.Conv2d(256, 256, 3, padding=1),
            nn.GroupNorm(8, 256),
            nn.SiLU(),
        )
        
        # 解码器
        self.dec3 = nn.Sequential(
            nn.Conv2d(512, 128, 3, padding=1),
            nn.GroupNorm(8, 128),
            nn.SiLU(),
            nn.Conv2d(128, 128, 3, padding=1),
            nn.GroupNorm(8, 128),
            nn.SiLU(),
        )
        self.dec2 = nn.Sequential(
            nn.Conv2d(256, 64, 3, padding=1),
            nn.GroupNorm(8, 64),
            nn.SiLU(),
            nn.Conv2d(64, 64, 3, padding=1),
            nn.GroupNorm(8, 64),
            nn.SiLU(),
        )
        self.dec1 = nn.Sequential(
            nn.Conv2d(128, 64, 3, padding=1),
            nn.GroupNorm(8, 64),
            nn.SiLU(),
            nn.Conv2d(64, 64, 3, padding=1),
            nn.GroupNorm(8, 64),
            nn.SiLU(),
        )
        
        self.final = nn.Conv2d(64, 1, 3, padding=1)
        
        # 上采样
        self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
        
    def forward(self, x, t, class_labels):
        # 时间步嵌入
        t_embed = self._get_timestep_embedding(t, 128)
        t_embed = self.time_embed(t_embed)
        
        # 类别嵌入
        class_embed = self.class_embed(class_labels)
        
        # 合并条件信息
        condition = t_embed + class_embed
        condition = condition.unsqueeze(-1).unsqueeze(-1)
        
        # 编码器路径
        x1 = self.enc1(x)
        x2 = self.enc2(x1)
        x3 = self.enc3(x2)
        
        # 中间层
        x_mid = self.mid(x3)
        
        # 解码器路径（添加条件信息）
        x = self.dec3(torch.cat([x_mid + condition.expand_as(x_mid), x3], dim=1))
        x = self.upsample(x)
        x = self.dec2(torch.cat([x + condition.expand_as(x), x2], dim=1))
        x = self.upsample(x)
        x = self.dec1(torch.cat([x + condition.expand_as(x), x1], dim=1))
        
        return self.final(x)
    
    def _get_timestep_embedding(self, timesteps, dim, max_period=10000):
        """
        创建正弦位置嵌入，与Transformer中的位置编码类似
        """
        half = dim // 2
        freqs = torch.exp(
            -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
        ).to(device=timesteps.device)
        
        args = timesteps[:, None].float() * freqs[None]
        embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
        
        if dim % 2:
            embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
            
        return embedding

# 4. 扩散过程工具函数
def q_sample(x_start, t, noise=None):
    """
    前向扩散过程：根据时间步t向数据添加噪声
    """
    if noise is None:
        noise = torch.randn_like(x_start)
    
    sqrt_alphas_cumprod_t = config.sqrt_alphas_cumprod[t].view(-1, 1, 1, 1)
    sqrt_one_minus_alphas_cumprod_t = config.sqrt_one_minus_alphas_cumprod[t].view(-1, 1, 1, 1)
    
    return sqrt_alphas_cumprod_t * x_start + sqrt_one_minus_alphas_cumprod_t * noise

def p_losses(denoise_model, x_start, t, class_labels, noise=None):
    """
    计算扩散模型的损失
    """
    if noise is None:
        noise = torch.randn_like(x_start)
    
    x_noisy = q_sample(x_start=x_start, t=t, noise=noise)
    predicted_noise = denoise_model(x_noisy, t, class_labels)
    
    return F.mse_loss(noise, predicted_noise)

# 5. 分类推理函数
def classify_with_diffusion(model, x, num_classes=10, num_samples=1, timesteps=50):
    """
    使用扩散模型进行分类
    原理：对于每个可能的类别，生成对应的图像，选择与输入最相似的类别
    """
    model.eval()
    
    with torch.no_grad():
        # 为每个类别生成样本
        all_generated = []
        
        for class_label in range(num_classes):
            # 从随机噪声开始
            batch_size = x.shape[0]
            img = torch.randn_like(x)
            
            # 逐步去噪
            for i in range(timesteps-1, -1, -1):
                t = torch.full((batch_size,), i, device=device, dtype=torch.long)
                
                # 预测噪声
                predicted_noise = model(img, t, torch.full((batch_size,), class_label, device=device))
                
                # 计算系数
                alpha = config.alphas[t].view(-1, 1, 1, 1)
                alpha_cumprod = config.alphas_cumprod[t].view(-1, 1, 1, 1)
                beta = config.betas[t].view(-1, 1, 1, 1)
                
                if i > 0:
                    noise = torch.randn_like(img)
                else:
                    noise = torch.zeros_like(img)
                
                # 更新图像
                img = (1 / torch.sqrt(alpha)) * (
                    img - ((1 - alpha) / (torch.sqrt(1 - alpha_cumprod))) * predicted_noise
                ) + torch.sqrt(beta) * noise
            
            all_generated.append(img)
        
        # 计算每个生成图像与原始图像的相似度
        similarities = []
        for gen_imgs in all_generated:
            # 使用MSE作为相似度度量（越小越相似）
            mse = F.mse_loss(gen_imgs, x, reduction='none').mean(dim=(1,2,3))
            similarities.append(mse)
        
        similarities = torch.stack(similarities, dim=1)  # [batch_size, num_classes]
        
        # 选择MSE最小的类别
        predicted = torch.argmin(similarities, dim=1)
        
        return predicted, similarities

# 6. 训练函数
def train_diffusion_model():
    model = ConditionalUNet().to(device)
    optimizer = optim.AdamW(model.parameters(), lr=1e-4)
    
    epochs = 20
    losses = []
    
    for epoch in range(epochs):
        model.train()
        total_loss = 0
        progress_bar = tqdm(train_loader, desc=f'Epoch {epoch+1}/{epochs}')
        
        for batch_idx, (data, labels) in enumerate(progress_bar):
            data = data.to(device)
            labels = labels.to(device)
            
            # 随机采样时间步
            t = torch.randint(0, config.timesteps, (data.shape[0],), device=device).long()
            
            # 计算损失
            loss = p_losses(model, data, t, labels)
            
            # 反向传播
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
            progress_bar.set_postfix({'Loss': f'{loss.item():.4f}'})
        
        avg_loss = total_loss / len(train_loader)
        losses.append(avg_loss)
        print(f'Epoch {epoch+1}, Average Loss: {avg_loss:.4f}')
        
        # 每5个epoch测试一次
        if (epoch + 1) % 5 == 0:
            test_accuracy = evaluate_model(model)
            print(f'Test Accuracy after epoch {epoch+1}: {test_accuracy:.2f}%')
    
    return model, losses

# 7. 评估函数
def evaluate_model(model):
    model.eval()
    correct = 0
    total = 0
    
    with torch.no_grad():
        for data, labels in test_loader:
            data, labels = data.to(device), labels.to(device)
            
            # 使用扩散模型进行分类
            predictions, _ = classify_with_diffusion(model, data)
            
            correct += (predictions == labels).sum().item()
            total += labels.size(0)
    
    accuracy = 100. * correct / total
    return accuracy

# 8. 可视化生成样本
def visualize_generation(model, num_samples=5):
    model.eval()
    
    fig, axes = plt.subplots(10, num_samples, figsize=(num_samples*2, 20))
    
    with torch.no_grad():
        for class_label in range(10):
            # 从随机噪声开始生成
            noise = torch.randn(num_samples, 1, 28, 28, device=device)
            
            # 生成过程
            img = noise
            for i in range(config.timesteps-1, -1, -1):
                t = torch.full((num_samples,), i, device=device, dtype=torch.long)
                predicted_noise = model(img, t, torch.full((num_samples,), class_label, device=device))
                
                alpha = config.alphas[t].view(-1, 1, 1, 1)
                alpha_cumprod = config.alphas_cumprod[t].view(-1, 1, 1, 1)
                beta = config.betas[t].view(-1, 1, 1, 1)
                
                if i > 0:
                    noise = torch.randn_like(img)
                else:
                    noise = torch.zeros_like(img)
                
                img = (1 / torch.sqrt(alpha)) * (
                    img - ((1 - alpha) / (torch.sqrt(1 - alpha_cumprod))) * predicted_noise
                ) + torch.sqrt(beta) * noise
            
            # 反归一化用于显示
            img = img.clamp(-1, 1)
            img = (img + 1) / 2  # [-1, 1] -> [0, 1]
            
            for j in range(num_samples):
                axes[class_label, j].imshow(img[j][0].cpu(), cmap='gray')
                axes[class_label, j].axis('off')
                if j == 0:
                    axes[class_label, j].set_title(f'Class {class_label}')
    
    plt.tight_layout()
    plt.show()

# 9. 主训练流程
print("开始训练扩散模型...")
model, losses = train_diffusion_model()

print("评估模型性能...")
final_accuracy = evaluate_model(model)
print(f"最终测试准确率: {final_accuracy:.2f}%")

# 可视化训练损失
plt.figure(figsize=(10, 5))
plt.plot(losses)
plt.title('扩散模型训练损失')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.grid(True)
plt.show()

# 可视化生成样本
print("生成样本可视化...")
visualize_generation(model)

# 10. 对比传统分类方法
def compare_with_traditional():
    """与传统CNN分类器进行对比"""
    class SimpleCNN(nn.Module):
        def __init__(self):
            super(SimpleCNN, self).__init__()
            self.conv1 = nn.Conv2d(1, 32, 3, 1)
            self.conv2 = nn.Conv2d(32, 64, 3, 1)
            self.fc1 = nn.Linear(9216, 128)
            self.fc2 = nn.Linear(128, 10)
        
        def forward(self, x):
            x = F.relu(self.conv1(x))
            x = F.relu(self.conv2(x))
            x = F.max_pool2d(x, 2)
            x = torch.flatten(x, 1)
            x = F.relu(self.fc1(x))
            x = self.fc2(x)
            return F.log_softmax(x, dim=1)
    
    cnn_model = SimpleCNN().to(device)
    cnn_optimizer = optim.Adam(cnn_model.parameters(), lr=0.001)
    
    # 快速训练CNN
    cnn_model.train()
    for epoch in range(5):
        for data, labels in train_loader:
            data, labels = data.to(device), labels.to(device)
            cnn_optimizer.zero_grad()
            output = cnn_model(data)
            loss = F.nll_loss(output, labels)
            loss.backward()
            cnn_optimizer.step()
    
    # 评估CNN
    cnn_model.eval()
    cnn_correct = 0
    cnn_total = 0
    with torch.no_grad():
        for data, labels in test_loader:
            data, labels = data.to(device), labels.to(device)
            outputs = cnn_model(data)
            _, predicted = torch.max(outputs.data, 1)
            cnn_total += labels.size(0)
            cnn_correct += (predicted == labels).sum().item()
    
    cnn_accuracy = 100 * cnn_correct / cnn_total
    print(f"传统CNN准确率: {cnn_accuracy:.2f}%")
    print(f"扩散模型准确率: {final_accuracy:.2f}%")

compare_with_traditional()