import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.cuda.amp import autocast, GradScaler  # 混合精度训练模块
import os
import yaml_operator

# ====================== 超参数定义 ======================
batch_size = 128  # 增大批次大小，提升GPU并行计算效率（需显存支持，如16GB显存可尝试256-512）
learning_rate = 0.1  # SGD常用较高初始学习率，配合动量加速收敛
num_epochs = 100  # 训练总轮数
image_size = 224  # 输入图像尺寸
patch_size = 16  # 图像分块大小（ViT核心参数）
num_classes = 10  # 分类类别数（CIFAR10为10类）
dim = 512  # 特征维度（Token嵌入维度）
depth = 6  # Transformer块数量（模型深度）
heads = 8  # 多头注意力头数
mlp_dim = 3072  # 前馈网络隐藏层维度
yaml_file_name = 'epoch.yaml'  # 保存训练轮数的YAML文件名
model_path = 'vit_model.pth'  # 模型参数保存路径

# ====================== 数据预处理 ======================
# 训练集数据增强（可进一步添加CutOut、MixUp等高级增强）
from torchvision.transforms import ColorJitter

trans_train = transforms.Compose([
	transforms.Resize((image_size, image_size)),  # 调整图像尺寸
	transforms.RandomCrop(image_size, padding=4),  # 随机裁剪（增强位置不变性）
	transforms.RandomHorizontalFlip(),  # 随机水平翻转（增强对称性）
	ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),  # 颜色抖动
	transforms.ToTensor(),  # 转换为张量
	transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # 归一化
])

# 测试集预处理（仅尺寸调整和归一化，无数据增强）
trans_valid = transforms.Compose([
	transforms.Resize((image_size, image_size)),
	transforms.ToTensor(),
	transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# ====================== 加载数据集 ======================
train_dataset = torchvision.datasets.CIFAR10(
	root='./data',
	train=True,
	download=True,
	transform=trans_train
)
test_dataset = torchvision.datasets.CIFAR10(
	root='./data',
	train=False,
	download=True,
	transform=trans_valid
)

# 创建数据加载器（训练集打乱顺序，测试集顺序固定）
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, pin_memory=True)  # pin_memory加速GPU数据传输
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, pin_memory=True)


# ====================== 定义核心模块 ======================
# 多头注意力模块（保持不变）
class Attention(nn.Module):
	def __init__(self, dim, heads=8, dim_head=64):
		super().__init__()
		inner_dim = dim_head * heads
		self.heads = heads
		self.scale = dim_head ** -0.5
		self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)
		self.to_out = nn.Linear(inner_dim, dim)

	def forward(self, x):
		b, n, _, h = *x.shape, self.heads
		qkv = self.to_qkv(x).chunk(3, dim=-1)
		q, k, v = map(lambda t: t.reshape(b, n, h, -1).transpose(1, 2), qkv)
		dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
		attn = dots.softmax(dim=-1)
		out = torch.matmul(attn, v).transpose(1, 2).reshape(b, n, -1)
		return self.to_out(out)


# 前馈网络模块（关键改进：GELU → Swish（SiLU））
class FeedForward(nn.Module):
	def __init__(self, dim, hidden_dim):
		super().__init__()
		self.net = nn.Sequential(
			nn.Linear(dim, hidden_dim),
			nn.SiLU(),  # Swish激活函数（比GELU更光滑，梯度更稳定）
			nn.Linear(hidden_dim, dim)
		)

	def forward(self, x):
		return self.net(x)


# Transformer块（保持结构不变）
class TransformerBlock(nn.Module):
	def __init__(self, dim, heads, dim_head, mlp_dim):
		super().__init__()
		self.norm1 = nn.LayerNorm(dim)
		self.attn = Attention(dim, heads=heads, dim_head=dim_head)
		self.norm2 = nn.LayerNorm(dim)
		self.ff = FeedForward(dim, mlp_dim)

	def forward(self, x):
		x = x + self.attn(self.norm1(x))  # 多头注意力残差连接
		x = x + self.ff(self.norm2(x))  # 前馈网络残差连接
		return x


# ViT模型（添加权重初始化）
class ViT(nn.Module):
	def __init__(self, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim):
		super().__init__()
		assert image_size % patch_size == 0, "图像尺寸必须能被分块大小整除"
		num_patches = (image_size // patch_size) ** 2
		patch_dim = 3 * patch_size ** 2

		self.patch_embedding = nn.Linear(patch_dim, dim)  # 图像块嵌入层
		self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))  # 位置嵌入
		self.cls_token = nn.Parameter(torch.randn(1, 1, dim))  # 分类令牌
		self.transformer = nn.ModuleList([  # Transformer块列表
			TransformerBlock(dim, heads, dim // heads, mlp_dim) for _ in range(depth)
		])
		self.dropout = nn.Dropout(0.1)  # Dropout层（防止过拟合，可调整）
		self.mlp_head = nn.Sequential(
			nn.LayerNorm(dim),
			nn.Linear(dim, num_classes)
		)
		self.patch_size = patch_size

		# 权重初始化（提升收敛速度，关键改进）
		self.apply(self._init_weights)

	def _init_weights(self, m):
		if isinstance(m, nn.Linear):
			nn.init.xavier_uniform_(m.weight)  # Xavier均匀初始化（适合非线性激活函数）
			if m.bias is not None:
				nn.init.zeros_(m.bias)
		elif isinstance(m, nn.LayerNorm):
			nn.init.zeros_(m.bias)
			nn.init.ones_(m.weight)

	def forward(self, x):
		b, c, h, w = x.shape
		p = self.patch_size
		x = x.reshape(b, c, h // p, p, w // p, p).permute(0, 2, 4, 3, 5, 1).reshape(b, -1, c * p * p)
		x = self.patch_embedding(x)  # 图像块嵌入

		cls_tokens = self.cls_token.expand(b, -1, -1)  # 扩展分类令牌
		x = torch.cat([cls_tokens, x], dim=1) + self.pos_embedding  # 拼接位置信息

		for block in self.transformer:
			x = block(x)  # 逐层通过Transformer块

		x = self.dropout(x)  # 应用Dropout
		cls_token = x[:, 0]  # 提取分类令牌特征
		return self.mlp_head(cls_token)  # 分类头输出


# ====================== 初始化模型 ======================
model = ViT(image_size, patch_size, num_classes, dim, depth, heads, mlp_dim)

# 加载已有模型（若存在）
if os.path.exists(model_path):
	model.load_state_dict(torch.load(model_path))
	print("加载已有模型继续训练...")

# ====================== 定义损失函数和优化器 ======================
criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数

# 关键改进：AdamW → SGD+动量（适合ViT的大规模参数优化）
optimizer = optim.SGD(
	model.parameters(),
	lr=learning_rate,
	momentum=0.9,  # 动量参数，加速收敛并减少震荡
	weight_decay=0.01  # L2正则化（权重衰减），抑制过拟合
)

# 学习率调度器：余弦退火（配合SGD效果更佳）
scheduler = CosineAnnealingLR(optimizer, T_max=num_epochs)  # T_max为余弦周期（总epoch数）

# ====================== 设备配置和混合精度 ======================
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
scaler = GradScaler()  # 梯度缩放器（混合精度训练核心组件）

# 读取已训练轮数（用于断点续训）
real_epoch_num = yaml_operator.read_yaml_number_param(yaml_file_name, 'epoch_num')

# ====================== 训练循环 ======================
for epoch in range(num_epochs):
	model.train()
	running_loss = 0.0
	correct = 0
	total = 0

	for i, (images, labels) in enumerate(train_loader):
		images = images.to(device, non_blocking=True)  # 非阻塞传输，加速数据到GPU
		labels = labels.to(device, non_blocking=True)

		optimizer.zero_grad(set_to_none=True)  # 梯度清零（比zero_grad()更高效）

		# 混合精度前向传播（关键改进：FP16计算减少显存占用和计算时间）
		with autocast():  # 自动选择FP16/FP32进行计算
			outputs = model(images)
			loss = criterion(outputs, labels)

		# 混合精度反向传播（防止梯度下溢/溢出）
		scaler.scale(loss).backward()  # 缩放损失后反向传播
		scaler.step(optimizer)  # 缩放梯度后更新参数
		scaler.update()  # 更新缩放因子（动态调整）

		running_loss += loss.item()
		_, predicted = torch.max(outputs.data, 1)
		total += labels.size(0)
		correct += (predicted == labels).sum().item()

		# 每100步打印训练状态
		if (i + 1) % 100 == 0:
			train_accuracy = 100 * correct / total
			print(
				f"第 [{epoch + 1 + real_epoch_num}/{num_epochs}] 轮，第 [{i + 1}/{len(train_loader)}] 步，"
				f"损失值: {running_loss / 100:.4f}，训练集精度: {train_accuracy:.2f}%"
			)
			running_loss = 0.0
			correct = 0
			total = 0

	# 保存模型和更新训练轮数
	torch.save(model.state_dict(), model_path)
	print(f"第 [{epoch + 1 + real_epoch_num}/{num_epochs}] 轮模型已保存")
	yaml_operator.write_yaml_number_param(yaml_file_name, 'epoch_num', epoch + 1 + real_epoch_num)

	# ====================== 验证集评估 ======================
	model.eval()
	correct_val = 0
	total_val = 0
	with torch.no_grad():  # 评估时关闭梯度计算
		for images, labels in test_loader:
			images, labels = images.to(device), labels.to(device)
			outputs = model(images)
			_, predicted = torch.max(outputs.data, 1)
			total_val += labels.size(0)
			correct_val += (predicted == labels).sum().item()

	val_accuracy = 100 * correct_val / total_val
	print(f"第 [{epoch + 1 + real_epoch_num}/{num_epochs}] 轮结束，验证集准确率: {val_accuracy:.2f}%\n")

	# 更新学习率（余弦退火调度）
	scheduler.step()

# ====================== 最终测试 ======================
model.eval()
correct_test = 0
total_test = 0
with torch.no_grad():
	for images, labels in test_loader:
		images, labels = images.to(device), labels.to(device)
		outputs = model(images)
		_, predicted = torch.max(outputs.data, 1)
		total_test += labels.size(0)
		correct_test += (predicted == labels).sum().item()

print(f"模型在 10000 张测试图像上的最终准确率: {100 * correct_test / total_test:.2f}%")