import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
import os
import time
import math
from einops import rearrange
from torch.optim.lr_scheduler import CosineAnnealingLR
from torchvision.models import resnet18

# 设置随机种子保证可复现性
torch.manual_seed(42)
np.random.seed(42)


# 1. 数据预处理和加载
def load_data():
    """
    加载MNIST数据集并进行预处理：
    1. 调整尺寸为32x32以适应Vision Transformer
    2. 转换为Tensor并归一化到[0,1]
    3. 使用MNIST的均值和标准差进行标准化
    """
    transform = transforms.Compose([
        transforms.Resize((32, 32)),  # ViT需要固定大小的输入
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])

    # 下载并加载数据集
    train_data = datasets.MNIST(
        root='./data', train=True, download=True, transform=transform)
    test_data = datasets.MNIST(
        root='./data', train=False, download=True, transform=transform)

    # 创建数据加载器
    batch_size = 128
    num_workers = 4 if torch.cuda.is_available() else 2
    pin_memory = torch.cuda.is_available()

    train_loader = DataLoader(
        train_data, batch_size=batch_size, shuffle=True,
        num_workers=num_workers, pin_memory=pin_memory
    )
    test_loader = DataLoader(
        test_data, batch_size=batch_size, shuffle=False,
        num_workers=num_workers, pin_memory=pin_memory
    )

    return train_loader, test_loader


# 2. Vision Transformer (ViT) 实现
class ViT(nn.Module):
    """
    Vision Transformer模型实现

    参数:
        image_size: 输入图像尺寸 (默认32)
        patch_size: 图像分块大小 (默认8)
        num_classes: 分类类别数 (MNIST为10)
        dim: Transformer隐藏层维度 (默认256)
        depth: Transformer编码器层数 (默认6)
        heads: 多头注意力头数 (默认8)
        mlp_dim: 前馈网络隐藏层维度 (默认512)
        dropout: Dropout概率 (默认0.1)
        emb_dropout: 嵌入层Dropout概率 (默认0.1)
    """

    def __init__(
            self,
            image_size=32,
            patch_size=8,
            num_classes=10,
            dim=256,
            depth=6,
            heads=8,
            mlp_dim=512,
            dropout=0.1,
            emb_dropout=0.1
    ):
        super().__init__()

        # 计算分块数量
        num_patches = (image_size // patch_size) ** 2
        patch_dim = 1 * patch_size ** 2  # 灰度图像，通道数为1

        # 1. 分块嵌入层
        self.patch_size = patch_size
        self.patch_embedding = nn.Conv2d(1, dim, kernel_size=patch_size, stride=patch_size)

        # 2. 位置编码 (可学习)
        self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))

        # 3. 类别标记 (用于分类)
        self.cls_token = nn.Parameter(torch.randn(1, 1, dim))

        # 4. Dropout层
        self.dropout = nn.Dropout(emb_dropout)

        # 5. Transformer编码器
        self.transformer = TransformerEncoder(dim, depth, heads, mlp_dim, dropout)

        # 6. 分类头 (层归一化 + 线性层)
        self.layer_norm = nn.LayerNorm(dim)
        self.classifier = nn.Linear(dim, num_classes)

        # 7. 初始化权重
        self._initialize_weights()

    def _initialize_weights(self):
        """权重初始化"""
        # 卷积层初始化
        nn.init.kaiming_normal_(self.patch_embedding.weight, mode='fan_out', nonlinearity='relu')
        if self.patch_embedding.bias is not None:
            nn.init.constant_(self.patch_embedding.bias, 0)

        # 线性层初始化
        nn.init.normal_(self.classifier.weight, std=0.02)
        nn.init.constant_(self.classifier.bias, 0)

        # 位置编码初始化
        nn.init.normal_(self.pos_embedding, std=0.02)
        nn.init.normal_(self.cls_token, std=0.02)

    def forward(self, img):
        """
        前向传播

        步骤:
        1. 图像分块嵌入
        2. 添加类别标记
        3. 添加位置编码
        4. 通过Transformer编码器
        5. 提取类别标记的输出
        6. 通过分类头
        """
        # 获取batch大小
        b = img.shape[0]

        # 1. 图像分块嵌入
        # 输出形状: (batch_size, dim, num_patches_h, num_patches_w)
        x = self.patch_embedding(img)

        # 2. 展平为序列: (batch_size, num_patches, dim)
        x = x.flatten(2).transpose(1, 2)

        # 3. 添加类别标记
        cls_tokens = self.cls_token.expand(b, -1, -1)  # 扩展为(b, 1, dim)
        x = torch.cat((cls_tokens, x), dim=1)  # 拼接在第0维度上

        # 4. 添加位置编码
        x += self.pos_embedding[:, :(x.size(1))]
        x = self.dropout(x)

        # 5. 通过Transformer编码器
        x = self.transformer(x)

        # 6. 提取类别标记的输出 (序列中的第一个标记)
        x = x[:, 0]

        # 7. 层归一化 + 分类头
        x = self.layer_norm(x)
        x = self.classifier(x)

        return x


class TransformerEncoder(nn.Module):
    """
    Transformer编码器 (包含多个编码器层)

    参数:
        dim: 隐藏层维度
        depth: 编码器层数
        heads: 多头注意力头数
        mlp_dim: 前馈网络隐藏层维度
        dropout: Dropout概率
    """

    def __init__(self, dim, depth, heads, mlp_dim, dropout=0.1):
        super().__init__()
        self.layers = nn.ModuleList([
            TransformerEncoderLayer(dim, heads, mlp_dim, dropout)
            for _ in range(depth)
        ])

    def forward(self, x):
        for layer in self.layers:
            x = layer(x)
        return x


class TransformerEncoderLayer(nn.Module):
    """
    Transformer编码器层

    包含:
        1. 多头自注意力
        2. 残差连接 + 层归一化
        3. 前馈网络
        4. 残差连接 + 层归一化
    """

    def __init__(self, dim, heads, mlp_dim, dropout=0.1):
        super().__init__()
        self.norm1 = nn.LayerNorm(dim)
        self.attn = MultiHeadAttention(dim, heads, dropout)
        self.dropout1 = nn.Dropout(dropout)

        self.norm2 = nn.LayerNorm(dim)
        self.mlp = FeedForward(dim, mlp_dim, dropout)
        self.dropout2 = nn.Dropout(dropout)

    def forward(self, x):
        # 1. 多头自注意力
        attn_output = self.attn(self.norm1(x))
        x = x + self.dropout1(attn_output)

        # 2. 前馈网络
        mlp_output = self.mlp(self.norm2(x))
        x = x + self.dropout2(mlp_output)

        return x


class MultiHeadAttention(nn.Module):
    """
    多头自注意力机制

    参数:
        dim: 输入维度
        heads: 注意力头数
        dropout: Dropout概率
    """

    def __init__(self, dim, heads=8, dropout=0.1):
        super().__init__()
        assert dim % heads == 0, "dim必须是heads的整数倍"

        self.dim = dim
        self.heads = heads
        self.scale = dim ** -0.5  # 缩放因子

        # Q, K, V投影
        self.to_qkv = nn.Linear(dim, dim * 3, bias=False)

        # 输出投影
        self.to_out = nn.Linear(dim, dim)

        # Dropout
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        batch_size, seq_len, dim = x.shape

        # 1. 生成Q, K, V
        qkv = self.to_qkv(x).chunk(3, dim=-1)
        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.heads), qkv)

        # 2. 计算注意力分数 (Q·K^T)
        attn_scores = torch.einsum('b h i d, b h j d -> b h i j', q, k) * self.scale

        # 3. 计算注意力权重
        attn_weights = attn_scores.softmax(dim=-1)
        attn_weights = self.dropout(attn_weights)

        # 4. 应用注意力权重到V
        out = torch.einsum('b h i j, b h j d -> b h i d', attn_weights, v)

        # 5. 重组多头输出
        out = rearrange(out, 'b h n d -> b n (h d)')

        # 6. 输出投影
        out = self.to_out(out)

        return out


class FeedForward(nn.Module):
    """
    前馈网络 (两层线性层 + GELU激活 + Dropout)

    参数:
        dim: 输入维度
        hidden_dim: 隐藏层维度
        dropout: Dropout概率
    """

    def __init__(self, dim, hidden_dim, dropout=0.1):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(dim, hidden_dim),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, dim),
            nn.Dropout(dropout)
        )

    def forward(self, x):
        return self.net(x)


# 3. 经典网络 (ResNet-18) 作为比较
class ResNetMNIST(nn.Module):
    """
    修改后的ResNet-18适应MNIST数据集

    修改点:
        1. 输入通道改为1 (灰度图)
        2. 去除第一个大卷积核(7x7)的卷积层
        3. 最终全连接层输出10个类别
    """

    def __init__(self, num_classes=10):
        super().__init__()
        # 加载预定义的ResNet-18
        self.model = resnet18(pretrained=False)

        # 修改输入层: 原始是3通道RGB, 这里改为1通道灰度
        self.model.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1, bias=False)

        # 修改分类层
        self.model.fc = nn.Linear(self.model.fc.in_features, num_classes)

        # 调整自适应池化
        self.model.avgpool = nn.AdaptiveAvgPool2d((1, 1))

    def forward(self, x):
        return self.model(x)


# 4. 训练函数 (含L2正则化)
def train(model, device, train_loader, optimizer, epoch, l2_lambda=0.0001):
    model.train()
    total_loss = 0
    correct = 0
    total_samples = 0

    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()

        # 前向传播
        output = model(data)

        # 计算损失
        loss = nn.CrossEntropyLoss()(output, target)

        # 添加L2正则化 (权重衰减)
        if l2_lambda > 0:
            l2_reg = torch.tensor(0.).to(device)
            for param in model.parameters():
                if param.requires_grad:
                    l2_reg += torch.norm(param)  # 计算参数L2范数
            loss += l2_lambda * l2_reg

        # 反向传播
        loss.backward()

        # 梯度裁剪
        nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)

        optimizer.step()

        # 更新统计信息
        total_loss += loss.item() * data.size(0)
        _, predicted = output.max(1)
        correct += predicted.eq(target).sum().item()
        total_samples += data.size(0)

        # 每200个batch打印进度
        if batch_idx % 200 == 0:
            batch_acc = 100. * predicted.eq(target).sum().item() / data.size(0)
            print(f'Epoch: {epoch} | Batch: {batch_idx}/{len(train_loader)} | '
                  f'Loss: {loss.item():.4f} | Acc: {batch_acc:.2f}%')

    # 计算平均损失和准确率
    avg_loss = total_loss / total_samples
    accuracy = 100. * correct / total_samples
    return avg_loss, accuracy


# 5. 测试函数
def test(model, device, test_loader):
    model.eval()
    total_loss = 0
    correct = 0
    total_samples = len(test_loader.dataset)

    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            total_loss += nn.CrossEntropyLoss(reduction='sum')(output, target).item()
            _, predicted = output.max(1)
            correct += predicted.eq(target).sum().item()

    # 计算平均损失和准确率
    avg_loss = total_loss / total_samples
    accuracy = 100. * correct / total_samples
    return avg_loss, accuracy


# 6. 绘制训练曲线
def plot_training_curves(vit_stats, resnet_stats, save_path="results"):
    plt.figure(figsize=(14, 6))

    # 损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(vit_stats['train_loss'], 'b-', label='ViT Train')
    plt.plot(vit_stats['test_loss'], 'b--', label='ViT Test')
    plt.plot(resnet_stats['train_loss'], 'r-', label='ResNet Train')
    plt.plot(resnet_stats['test_loss'], 'r--', label='ResNet Test')
    plt.title('Training and Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True)

    # 准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(vit_stats['train_acc'], 'b-', label='ViT Train')
    plt.plot(vit_stats['test_acc'], 'b--', label='ViT Test')
    plt.plot(resnet_stats['train_acc'], 'r-', label='ResNet Train')
    plt.plot(resnet_stats['test_acc'], 'r--', label='ResNet Test')
    plt.title('Training and Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()
    plt.grid(True)

    plt.tight_layout()

    # 保存图像
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    plt.savefig(os.path.join(save_path, 'training_curves.png'), dpi=300)
    plt.close()
    print(f"训练曲线已保存到 {save_path}/training_curves.png")


# 7. 主训练函数
def train_and_evaluate():
    # 选择设备 (GPU优先)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    # 加载数据
    print("加载数据中...")
    train_loader, test_loader = load_data()

    # 初始化模型
    vit = ViT(
        image_size=32,
        patch_size=8,
        num_classes=10,
        dim=192,
        depth=6,
        heads=8,
        mlp_dim=384,
        dropout=0.1,
        emb_dropout=0.1
    ).to(device)

    resnet = ResNetMNIST(num_classes=10).to(device)

    models = {
        "ViT": vit,
        "ResNet": resnet
    }

    # 打印模型参数量
    def count_parameters(model):
        return sum(p.numel() for p in model.parameters() if p.requires_grad)

    print(f"ViT参数量: {count_parameters(models['ViT']):,}")
    print(f"ResNet参数量: {count_parameters(models['ResNet']):,}")

    # 优化器设置
    optimizers = {
        "ViT": optim.AdamW(models["ViT"].parameters(), lr=3e-4, weight_decay=0.05),
        "ResNet": optim.Adam(models["ResNet"].parameters(), lr=1e-3, weight_decay=1e-4)
    }

    # 学习率调度器
    schedulers = {
        "ViT": CosineAnnealingLR(optimizers["ViT"], T_max=10, eta_min=1e-5),
        "ResNet": CosineAnnealingLR(optimizers["ResNet"], T_max=10, eta_min=1e-5)
    }

    # 训练参数
    epochs = 20
    print(f"开始训练，共{epochs}个epoch...")

    # 记录训练统计信息
    stats = {
        "ViT": {'train_loss': [], 'test_loss': [], 'train_acc': [], 'test_acc': []},
        "ResNet": {'train_loss': [], 'test_loss': [], 'train_acc': [], 'test_acc': []}
    }

    # 训练循环
    start_time = time.time()

    for epoch in range(1, epochs + 1):
        epoch_start = time.time()
        print(f"\nEpoch {epoch}/{epochs}:")

        for name in models.keys():
            model = models[name]
            optimizer = optimizers[name]

            # 训练一个epoch
            train_loss, train_acc = train(
                model, device, train_loader, optimizer, epoch
            )

            # 在测试集上评估
            test_loss, test_acc = test(model, device, test_loader)

            # 更新学习率
            schedulers[name].step()

            # 记录统计信息
            stats[name]['train_loss'].append(train_loss)
            stats[name]['train_acc'].append(train_acc)
            stats[name]['test_loss'].append(test_loss)
            stats[name]['test_acc'].append(test_acc)

            # 打印结果
            print(f"  {name}模型:")
            print(f"    训练损失: {train_loss:.4f}, 测试损失: {test_loss:.4f}")
            print(f"    训练准确率: {train_acc:.2f}%, 测试准确率: {test_acc:.2f}%")

        epoch_time = time.time() - epoch_start
        total_time = time.time() - start_time
        print(f"  本epoch耗时: {epoch_time:.1f}秒 | 累计时间: {total_time / 60:.1f}分钟")
        print("-" * 80)

    # 绘制训练曲线
    print("\n绘制训练曲线中...")
    plot_training_curves(stats["ViT"], stats["ResNet"])

    # 性能比较
    print("\n最终性能比较:")
    for name in models.keys():
        best_test_acc = max(stats[name]['test_acc'])
        final_test_acc = stats[name]['test_acc'][-1]
        print(f"  {name}模型 - 最高测试准确率: {best_test_acc:.2f}%")
        print(f"  {name}模型 - 最终测试准确率: {final_test_acc:.2f}%")


if __name__ == '__main__':
    # 开始训练
    train_start = time.time()
    train_and_evaluate()
    total_time = time.time() - train_start
    print(f"\n总训练时间: {total_time / 60:.1f}分钟")