import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.metrics import confusion_matrix
import seaborn as sns

# 设置随机种子，确保结果可复现
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)

# 检查是否有可用的GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 数据预处理
transform = transforms.Compose([
    transforms.ToTensor(),  # 将图像转换为Tensor
    transforms.Normalize((0.1307,), (0.3081,))  # 归一化处理，MNIST数据集的均值和标准差
])

# 加载训练集和测试集
train_dataset = datasets.MNIST('data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST('data', train=False, transform=transform)

# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1000)

# 定义两层神经网络模型
class TwoLayerNet(nn.Module):
    def __init__(self):
        super(TwoLayerNet, self).__init__()
        self.fc1 = nn.Linear(28 * 28, 128)  # 输入层到隐藏层
        self.fc2 = nn.Linear(128, 10)  # 隐藏层到输出层
        self.relu = nn.ReLU()  # 激活函数
    
    def forward(self, x):
        x = x.view(-1, 28 * 28)  # 将图像展平为一维向量
        x = self.relu(self.fc1(x))  # 第一层线性变换后应用ReLU激活函数
        x = self.fc2(x)  # 第二层线性变换
        return x

# 实例化模型并移至设备
model = TwoLayerNet().to(device)

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练和评估函数
def train(epoch):
    model.train()
    train_loss = 0
    correct = 0
    total = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        
        train_loss += loss.item()
        _, predicted = output.max(1)
        total += target.size(0)
        correct += predicted.eq(target).sum().item()
        
        if batch_idx % 100 == 0:
            print(f'Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} '
                  f'({100. * batch_idx / len(train_loader):.0f}%)]\t'
                  f'Loss: {loss.item():.6f}\t'
                  f'Accuracy: {100. * correct / total:.2f}%')
    
    # 返回训练损失和准确率
    train_loss /= len(train_loader)
    train_acc = 100. * correct / total
    return train_loss, train_acc

def test():
    model.eval()
    test_loss = 0
    correct = 0
    total = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += criterion(output, target).item()
            _, predicted = output.max(1)
            total += target.size(0)
            correct += predicted.eq(target).sum().item()
    
    # 返回测试损失和准确率
    test_loss /= len(test_loader)
    test_acc = 100. * correct / total
    print(f'\nTest set: Average loss: {test_loss:.4f}, '
          f'Accuracy: {correct}/{total} ({test_acc:.2f}%)\n')
    return test_loss, test_acc

# 训练模型
epochs = 10
train_losses, train_accs = [], []
test_losses, test_accs = [], []

for epoch in range(1, epochs + 1):
    train_loss, train_acc = train(epoch)
    test_loss, test_acc = test()
    
    train_losses.append(train_loss)
    train_accs.append(train_acc)
    test_losses.append(test_loss)
    test_accs.append(test_acc)

# 保存模型
torch.save(model.state_dict(), 'mnist_model.pth')

# 可视化训练过程
plt.figure(figsize=(12, 4))

# 绘制损失曲线
plt.subplot(1, 2, 1)
plt.plot(train_losses, label='Train Loss')
plt.plot(test_losses, label='Test Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training and Test Loss')
plt.legend()
plt.grid(True)

# 绘制准确率曲线
plt.subplot(1, 2, 2)
plt.plot(train_accs, label='Train Accuracy')
plt.plot(test_accs, label='Test Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy (%)')
plt.title('Training and Test Accuracy')
plt.legend()
plt.grid(True)

plt.tight_layout()
plt.savefig('training_metrics.png')
plt.show()

# 可视化预测结果
plt.figure(figsize=(12, 12))
sample_size = 25
samples = random.sample(range(len(test_dataset)), sample_size)

for i, idx in enumerate(samples):
    img, true_label = test_dataset[idx]
    img_tensor = img.to(device).unsqueeze(0)
    
    model.eval()
    with torch.no_grad():
        output = model(img_tensor)
        pred = output.argmax(dim=1, keepdim=True).item()
    
    plt.subplot(5, 5, i + 1)
    plt.imshow(img.squeeze().cpu().numpy(), cmap='gray')
    plt.title(f'Pred: {pred}, True: {true_label}', 
              color='green' if pred == true_label else 'red')
    plt.axis('off')

plt.tight_layout()
plt.savefig('predictions.png')
plt.show()

# 计算并可视化混淆矩阵
all_preds = []
all_labels = []

model.eval()
with torch.no_grad():
    for data, target in test_loader:
        data = data.to(device)
        output = model(data)
        preds = output.argmax(dim=1).cpu().numpy()
        all_preds.extend(preds)
        all_labels.extend(target.numpy())

cm = confusion_matrix(all_labels, all_preds)

plt.figure(figsize=(10, 8))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', 
            xticklabels=range(10), yticklabels=range(10))
plt.xlabel('Predicted Label')
plt.ylabel('True Label')
plt.title('Confusion Matrix')
plt.savefig('confusion_matrix.png')
plt.show()

# 输出最终结果
print(f"训练完成！模型在测试集上的准确率为: {test_accs[-1]:.2f}%")
print(f"训练轮数: {epochs}")
print(f"优化器: Adam")
print(f"学习率: 0.001")
print(f"隐藏层大小: 128")