import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np

# 设置随机种子，确保结果可复现
torch.manual_seed(42)
np.random.seed(42)

# 定义LeNet模型
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        # 卷积层：输入1通道，输出6通道，卷积核大小5x5
        self.conv1 = nn.Conv2d(1, 6, kernel_size=5)
        # 卷积层：输入6通道，输出16通道，卷积核大小5x5
        self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
        # 全连接层：输入16*4*4=256，输出120
        self.fc1 = nn.Linear(16 * 4 * 4, 120)
        # 全连接层：输入120，输出84
        self.fc2 = nn.Linear(120, 84)
        # 全连接层：输入84，输出10（MNIST有10个类别）
        self.fc3 = nn.Linear(84, 10)
        # 池化层：2x2最大池化
        self.pool = nn.MaxPool2d(2, 2)
        # ReLU激活函数
        self.relu = nn.ReLU()
        
    def forward(self, x):
        # 输入: (batch_size, 1, 28, 28)
        x = self.pool(self.relu(self.conv1(x)))  # 输出: (batch_size, 6, 12, 12)
        x = self.pool(self.relu(self.conv2(x)))  # 输出: (batch_size, 16, 4, 4)
        x = x.view(-1, 16 * 4 * 4)  # 展平: (batch_size, 256)
        x = self.relu(self.fc1(x))  # 输出: (batch_size, 120)
        x = self.relu(self.fc2(x))  # 输出: (batch_size, 84)
        x = self.fc3(x)  # 输出: (batch_size, 10)
        return x

# 数据预处理
transform = transforms.Compose([
    transforms.ToTensor(),  # 转换为Tensor
    transforms.Normalize((0.1307,), (0.3081,))  # 标准化，使用MNIST数据集的均值和标准差
])

# 加载MNIST数据集
train_dataset = datasets.MNIST(
    root='./data',
    train=True,
    download=True,
    transform=transform
)

test_dataset = datasets.MNIST(
    root='./data',
    train=False,
    download=True,
    transform=transform
)

# 创建数据加载器
batch_size = 64
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

# 初始化模型、损失函数和优化器
device = torch.device('mps')
model = LeNet().to(device)
criterion = nn.CrossEntropyLoss()  # 交叉熵损失，适用于分类任务
optimizer = optim.Adam(model.parameters(), lr=0.001)  # Adam优化器

# 训练模型
def train(model, train_loader, criterion, optimizer, epochs=5):
    model.train()  # 设置为训练模式
    train_losses = []
    
    for epoch in range(epochs):
        running_loss = 0.0
        for i, (images, labels) in enumerate(train_loader):
            images, labels = images.to(device), labels.to(device)
            
            # 前向传播
            outputs = model(images)
            loss = criterion(outputs, labels)
            
            # 反向传播和优化
            optimizer.zero_grad()  # 清零梯度
            loss.backward()  # 反向传播
            optimizer.step()  # 更新参数
            
            running_loss += loss.item()
            
            # 每100个批次打印一次信息
            if (i + 1) % 100 == 0:
                print(f'Epoch [{epoch+1}/{epochs}], Step [{i+1}/{len(train_loader)}], Loss: {loss.item():.4f}')
        
        # 计算每个epoch的平均损失
        epoch_loss = running_loss / len(train_loader)
        train_losses.append(epoch_loss)
        print(f'Epoch [{epoch+1}/{epochs}], Average Loss: {epoch_loss:.4f}')
    
    return train_losses

# 测试模型
def test(model, test_loader):
    model.eval()  # 设置为评估模式
    correct = 0
    total = 0
    
    with torch.no_grad():  # 不计算梯度
        for images, labels in test_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    
    accuracy = 100 * correct / total
    print(f'Test Accuracy: {accuracy:.2f}%')
    return accuracy

# 可视化一些测试结果
def visualize_results(model, test_loader, num_samples=5):
    model.eval()
    images, labels = next(iter(test_loader))
    images, labels = images[:num_samples].to(device), labels[:num_samples].to(device)
    
    with torch.no_grad():
        outputs = model(images)
        _, predicted = torch.max(outputs, 1)
    
    # 转换为CPU并可视化
    images = images.cpu().numpy()
    labels = labels.cpu().numpy()
    predicted = predicted.cpu().numpy()
    
    plt.figure(figsize=(10, 4))
    for i in range(num_samples):
        plt.subplot(1, num_samples, i+1)
        plt.imshow(images[i].squeeze(), cmap='gray')
        plt.title(f'真实: {labels[i]}\n预测: {predicted[i]}')
        plt.axis('off')
    plt.tight_layout()
    plt.show()

# 训练模型（5个epochs）
print(f"使用设备: {device}")
train_losses = train(model, train_loader, criterion, optimizer, epochs=20)

# 测试模型
test_accuracy = test(model, test_loader)

# 可视化训练损失
plt.figure(figsize=(10, 5))
plt.plot(range(1, len(train_losses)+1), train_losses, marker='o')
plt.title('Training Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.grid(True)
plt.show()

# 可视化一些测试结果
visualize_results(model, test_loader)

# 保存模型
torch.save(model.state_dict(), 'lenet_mnist.pth')
print("模型已保存为 'lenet_mnist.pth'")

torch.onnx.export(model.to('cpu'),  torch.randn(1, 1, 28, 28), "lenet_mnist.onnx", export_params=True, opset_version=11, do_constant_folding=True, input_names=['input'], output_names=['output'])

import openvino as ov

ov_model = ov.convert_model("lenet_mnist.onnx")
ov.save_model(ov_model, "lenet_mnist.xml")
print("OpenVINO模型已保存为 'lenet_mnist.xml'")
