import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from models.cnn import SimpleCNN
from utils import evaluate
import matplotlib.pyplot as plt
import numpy as np
import os
import json
from sklearn.metrics import confusion_matrix
import seaborn as sns
# import torch
# print(torch.cuda.is_available())验证GPU是否可用

# 数据加载
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])
train_set = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_set = datasets.MNIST(root='./data', train=False, download=True, transform=transform)
train_loader = DataLoader(train_set, batch_size=64, shuffle=True)
test_loader = DataLoader(test_set, batch_size=1000, shuffle=False)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = SimpleCNN().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

# 检查是否已有权重文件，优先加载
weight_path = 'cnn_mnist.pth'
if os.path.exists(weight_path):
    model.load_state_dict(torch.load(weight_path, map_location=device))
    print(f"Loaded model weights from {weight_path}")
else:
    train_losses = []
    train_accuracies = []
    for epoch in range(5):
        model.train()
        total_loss = 0
        correct = 0
        total = 0
        for data, target in train_loader:
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
            pred = output.argmax(dim=1)
            correct += (pred == target).sum().item()
            total += target.size(0)
        avg_loss = total_loss / len(train_loader)
        acc = correct / total
        train_losses.append(avg_loss)
        train_accuracies.append(acc)
        print(f"Epoch {epoch+1}, Loss: {avg_loss:.4f}, Train Acc: {acc:.4f}")
    # 保存权重
    torch.save(model.state_dict(), weight_path)
    print(f"Model weights saved to {weight_path}")
    # 训练过程损失曲线
    plt.figure()
    plt.plot(range(1, 6), train_losses, marker='o')
    plt.title('CNN Training Loss Curve')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.grid()
    plt.show()
    # 训练过程准确率曲线
    plt.figure()
    plt.plot(range(1, 6), train_accuracies, marker='o', color='orange')
    plt.title('CNN Training Accuracy Curve')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.grid()
    plt.show()

# 测试
model.eval()
y_true, y_pred = [], []
all_imgs = []
with torch.no_grad():
    for data, target in test_loader:
        data = data.to(device)
        output = model(data)
        pred = output.argmax(dim=1).cpu().numpy()
        y_pred.extend(pred)
        y_true.extend(target.numpy())
        all_imgs.extend(data.cpu().numpy())
acc, pre, rec, f1 = evaluate(y_true, y_pred)
print(f"CNN Test Accuracy: {acc:.4f}, Precision: {pre:.4f}, Recall: {rec:.4f}, F1: {f1:.4f}")
# 保存CNN指标到json，供对比用
with open('cnn_metrics.json', 'w') as f:
    json.dump([acc, pre, rec, f1], f)

# 混淆矩阵可视化
cm = confusion_matrix(y_true, y_pred)
plt.figure(figsize=(8,6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion Matrix of CNN on MNIST')
plt.show()

# 可视化部分预测结果
plt.figure(figsize=(10, 4))
for i in range(10):
    plt.subplot(2, 5, i+1)
    img = all_imgs[i][0] * 0.3081 + 0.1307  # 反归一化
    plt.imshow(img, cmap='gray')
    plt.title(f"True: {y_true[i]}\nPred: {y_pred[i]}")
    plt.axis('off')
plt.tight_layout()
plt.show()

# 超参数对比实验（学习率和优化器）
learning_rates = [0.01, 0.001, 0.0001]
optimizers = ['SGD', 'Adam']
results = {}
for opt_name in optimizers:
    for lr in learning_rates:
        model = SimpleCNN().to(device)
        if opt_name == 'SGD':
            optimizer = torch.optim.SGD(model.parameters(), lr=lr)
        else:
            optimizer = torch.optim.Adam(model.parameters(), lr=lr)
        for epoch in range(2):  # 只训练2个epoch加快速度
            model.train()
            for data, target in train_loader:
                data, target = data.to(device), target.to(device)
                optimizer.zero_grad()
                output = model(data)
                loss = criterion(output, target)
                loss.backward()
                optimizer.step()
        # 测试
        model.eval()
        correct = 0
        total = 0
        with torch.no_grad():
            for data, target in test_loader:
                data = data.to(device)
                output = model(data)
                pred = output.argmax(dim=1)
                correct += (pred.cpu() == target).sum().item()
                total += target.size(0)
        acc = correct / total
        results[f'{opt_name}_lr{lr}'] = acc
# 绘制超参数对比图
plt.figure()
plt.bar(results.keys(), results.values())
plt.ylabel('Accuracy')
plt.title('Effect of Learning Rate and Optimizer')
plt.xticks(rotation=45)
plt.tight_layout()
plt.show() 