import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import ssl

# 处理SSL证书问题，确保可以下载数据集
ssl._create_default_https_context = ssl._create_unverified_context

# 设备配置
if (torch.cuda.is_available()):
    device = torch.device('cuda')
elif (torch.backends.mps.is_available() and torch.backends.mps.is_built()):
    device = torch.device('mps')
else:
    device = torch.device('cpu')
print(f"使用设备: {device}")

# 超参数设置
input_size = 784  # 28x28像素的图像
hidden_size = 128  # 隐藏层神经元数量
num_classes = 10  # 0-9共10个数字类别
num_epochs = 5  # 训练轮数
batch_size = 100  # 批处理大小
learning_rate = 0.001  # 学习率

# 数据预处理和加载
# transforms.ToTensor() 将PIL图像或numpy数组转换为tensor，并将像素值从[0, 255]归一化到[0, 1]
transform = transforms.Compose([transforms.ToTensor(), 
                                transforms.Normalize((0.1307,), (0.3081,))])  # MNIST数据集的均值和标准差

# 下载并加载训练数据集
train_dataset = torchvision.datasets.MNIST(root='./data',
                                           train=True,
                                           transform=transform,
                                           download=True)

# 下载并加载测试数据集
test_dataset = torchvision.datasets.MNIST(root='./data',
                                          train=False,
                                          transform=transform)

# 创建数据加载器
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False)

# 显示一些样本数据
def imshow(img):
    img = img / 2 + 0.5  # 反向归一化
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)), cmap='gray')
    plt.show()

# 获取一些随机训练图像
dataiter = iter(train_loader)
images, labels = next(dataiter)

# 显示图像
print("显示一些训练样本:")
imshow(torchvision.utils.make_grid(images[:4]))
print('标签: ', ' '.join(f'{labels[j]}' for j in range(4)))

# 定义神经网络模型
class NeuralNet(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes):
        super(NeuralNet, self).__init__()
        # 全连接层
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()  # 激活函数
        self.fc2 = nn.Linear(hidden_size, hidden_size)
        self.fc3 = nn.Linear(hidden_size, num_classes)
        self.dropout = nn.Dropout(0.2)  # Dropout层防止过拟合
    
    def forward(self, x):
        # 前向传播
        out = self.fc1(x)
        out = self.relu(out)
        out = self.dropout(out)
        out = self.fc2(out)
        out = self.relu(out)
        out = self.dropout(out)
        out = self.fc3(out)
        return out

# 初始化模型
model = NeuralNet(input_size, hidden_size, num_classes).to(device)
print("\n模型结构:")
print(model)

# 损失函数和优化器
criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数适用于分类问题
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)  # Adam优化器

# 训练模型
print("\n开始训练...")
total_step = len(train_loader)
loss_list = []
acc_list = []

for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):
        # 将数据移动到设备上
        images = images.reshape(-1, 28*28).to(device)
        labels = labels.to(device)
        
        # 前向传播
        outputs = model(images)
        loss = criterion(outputs, labels)
        
        # 反向传播和优化
        optimizer.zero_grad()  # 清零梯度
        loss.backward()  # 计算梯度
        optimizer.step()  # 更新参数
        
        # 记录损失值
        loss_list.append(loss.item())
        
        # 打印训练信息
        if (i+1) % 100 == 0:
            print(f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{total_step}], Loss: {loss.item():.4f}')
            
            # 计算准确率
            with torch.no_grad():
                correct = 0
                total = 0
                for images, labels in test_loader:
                    images = images.reshape(-1, 28*28).to(device)
                    labels = labels.to(device)
                    outputs = model(images)
                    _, predicted = torch.max(outputs.data, 1)
                    total += labels.size(0)
                    correct += (predicted == labels).sum().item()
                
                accuracy = 100 * correct / total
                acc_list.append(accuracy)
                print(f'测试集准确率: {accuracy:.2f}%')

# 测试模型
print("\n开始测试...")
model.eval()  # 设置为评估模式
with torch.no_grad():
    correct = 0
    total = 0
    for images, labels in test_loader:
        images = images.reshape(-1, 28*28).to(device)
        labels = labels.to(device)
        outputs = model(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()
    
    print(f'最终测试准确率: {100 * correct / total:.2f}%')

# 保存模型
torch.save(model.state_dict(), 'mnist_model.pth')
print("\n模型已保存为'mnist_model.pth'")

# 将模型转为 TorchScript，供 C++ 推理使用
# 注意：TorchScript 需要在 CPU 上 trace（或 script），这里用一个示例输入 trace
model_cpu = model.to('cpu')
model_cpu.eval()
example_input = torch.randn(1, 784)  # 与模型输入一致（1 x 784）
traced_script_module = torch.jit.trace(model_cpu, example_input)
traced_script_module.save('mnist_model_scripted.pt')
print("TorchScript 模型已保存为 'mnist_model_scripted.pt'")

# 显示预测结果
print("\n显示预测结果:")
model.eval()
with torch.no_grad():
    dataiter = iter(test_loader)
    images, labels = next(dataiter)
    images_show = images
    images = images.reshape(-1, 28*28).to(device)
    outputs = model(images)
    _, predicted = torch.max(outputs, 1)
    
    print('真实标签: ', ' '.join(f'{labels[j]}' for j in range(12)))
    print('预测标签: ', ' '.join(f'{predicted[j]}' for j in range(12)))
    
    # 显示图像
    imshow(torchvision.utils.make_grid(images_show[:12]))

# 绘制训练损失和准确率曲线
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(loss_list)
plt.title('训练损失')
plt.xlabel('步骤')
plt.ylabel('损失')

plt.subplot(1, 2, 2)
plt.plot(acc_list)
plt.title('测试准确率')
plt.xlabel('评估次数')
plt.ylabel('准确率 (%)')
plt.show()

print("\n训练完成!")