import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
import matplotlib.pyplot as plt

device = (
    "cuda"
    if torch.cuda.is_available()
    else "mps（苹果）"
    if torch.backends.mps.is_available()
    else "cpu"
)
print(f"Using {device} device")

class Net(torch.nn.Module):       # 定义一个Net类，作为神经网络的主体

    def __init__(self):
        super().__init__()
        self.fc1 = torch.nn.Linear(28 * 28, 64)  # 包含4个全连接层，输入为28*28像素的图片
        self.fc2 = torch.nn.Linear(64, 64)
        self.fc3 = torch.nn.Linear(64, 64)
        self.fc4 = torch.nn.Linear(64, 10)

    def forward(self, x):  # 定义了向前传播过程，x为图像输入。每层传播中先做全连接线性计算，再套上激活函数relu
        x = torch.nn.functional.relu(self.fc1(x))
        x = torch.nn.functional.relu(self.fc2(x))
        x = torch.nn.functional.relu(self.fc3(x))
        x = torch.nn.functional.log_softmax(self.fc4(x), dim=1)    # 输出层log.softmax归一化
        return x


def get_data_loader(is_train):    # 定义get_data_loader进行导入数据
    to_tensor = transforms.Compose([transforms.ToTensor()])
    data_set = MNIST("./data/", is_train, transform=to_tensor, download=True)
    return DataLoader(data_set, batch_size=25, shuffle=True)

# 准确率计算
def evaluate(test_data, net):
    n_correct = 0
    n_total = 0
    with torch.no_grad():
        for (x, y) in test_data:
            outputs = net.forward(x.view(-1, 28 * 28))
            for i, output in enumerate(outputs):
                if torch.argmax(output) == y[i]:
                    n_correct += 1
                n_total += 1
    return n_correct / n_total


def main():
    train_data = get_data_loader(is_train=True)
    test_data = get_data_loader(is_train=False)
    net = Net()

    print(torch.__version__)
    print('显卡是否可用:', '可用' if (torch.cuda.is_available()) else '不可用')
    # print('当前显卡型号:', torch.cuda.get_device_name())
    print('当前显卡的CUDA算力:', torch.cuda.get_device_capability())
    print("initial accuracy:", evaluate(test_data, net))
    optimizer = torch.optim.Adam(net.parameters(), lr=0.001)    # 优化器
    for epoch in range(2):
        for (x, y) in train_data:
            net.zero_grad()
            output = net.forward(x.view(-1, 28 * 28))
            loss = torch.nn.functional.nll_loss(output, y)
            loss.backward()
            optimizer.step()
        print("epoch", epoch, "accuracy:", evaluate(test_data, net))
        print('当前显卡的显存使用率:',
              torch.cuda.memory_allocated(0) / torch.cuda.get_device_properties(0).total_memory * 100, '%')  # 打印GPU占用率

    for (n, (x, _)) in enumerate(test_data):   # 从测试集中抽取图片并显示预测结果
        if n > 5:
            break
        predict = torch.argmax(net.forward(x[0].view(-1, 28 * 28)))
        plt.figure(n)
        plt.imshow(x[0].view(28, 28))
        plt.title("prediction: " + str(int(predict)))
    plt.show()


if __name__ == "__main__":
    main()
