import numpy as np
from nn import nn
from Way import *
from Var import Var
import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader

# 使用PyTorch下载MNIST数据集
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))  # 设置MNIST数据集的均值和标准差
])

# 下载训练集
train_dataset = datasets.MNIST(
    root='./data', train=True, download=True, transform=transform
)

# 下载测试集
test_dataset = datasets.MNIST(
    root='./data', train=False, download=True, transform=transform
)

# 将PyTorch张量转换为numpy数组
train_images = []
train_labels = []
for img, label in train_dataset:
    train_images.append(img.numpy())
    train_labels.append(label)

test_images = []
test_labels = []
for img, label in test_dataset:
    test_images.append(img.numpy())
    test_labels.append(label)

# 转换为numpy数组并调整形状
train_images = np.array(train_images).reshape((60000, 1, 28, 28)).astype('float32')
test_images = np.array(test_images).reshape((10000, 1, 28, 28)).astype('float32')


# 标签转换为one-hot编码
def to_categorical(y, num_classes=10):
    return np.eye(num_classes)[y]


train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)


# 定义LeNet模型
class LeNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.conv2d(kernel_h=5, kernel_w=5, out_channels=6, in_channels=1)
        self.pool1 = nn.Pool(kernel_h=2, kernel_w=2, stride=2)
        self.conv2 = nn.conv2d(kernel_h=5, kernel_w=5, out_channels=16, in_channels=6)
        self.pool2 = nn.Pool(kernel_h=2, kernel_w=2, stride=2)
        self.fc1 = nn.linear(in_features=16 * 4 * 4, out_features=120)
        self.fc2 = nn.linear(in_features=120, out_features=84)
        self.fc3 = nn.linear(in_features=84, out_features=10)
        self.relu = nn.ReLU()

    def forward(self, x):
        x = self.relu(self.conv1(x))
        x = self.pool1(x)
        x = self.relu(self.conv2(x))
        x = self.pool2(x)
        x = x.reshape((x.data.shape[0], -1))
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        x = self.fc3(x)
        return x


# 模型训练
model = LeNet()

optimizer = Way.SGD(model.get_params(), lr=0.01)

num_epochs = 5
batch_size = 60


for epoch in range(num_epochs):
    count = 0
    for x,y in data_iter(batch_size, train_images, train_labels):
        inputs = Var(x)
        labels = Var(y)

        # 1. 模型输出原始logits
        outputs = model(inputs)
        # 2. 对输出执行softmax
        outputs_softmax = outputs.softmax(dim=1, keep_axis=True)  # 使用softmax
        # 3. 计算交叉熵损失
        loss = outputs_softmax.criterion(labels)

        optimizer.zero_grad()  # 清零梯度
        loss.backward()  # 反向传播计算梯度
        optimizer.step()  # 更新参数

        count += 1
        print(count, loss.data/len(batch_size))
    print(f'Epoch {epoch + 1}/{num_epochs}, Loss: {loss.data/len(batch_size)}')
    save_model(model, 'LeNet.txt')

# 测试阶段同样需要用softmax后的输出计算预测结果
test_inputs = Var(test_images)
test_outputs = model(test_inputs)
test_outputs_softmax = test_outputs.softmax(dim=1)  # softmax
test_preds = np.argmax(test_outputs_softmax.data, axis=1)  # 概率最大的类别预测
test_labels = np.argmax(test_labels, axis=1)
accuracy = np.mean(test_preds == test_labels)
print(f'Test Accuracy: {accuracy * 100:.2f}%')

