import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from torchsummary import summary
from Classifier import Classifier
import matplotlib.pyplot as plt

# Load the MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='./data',
                                           train=True,
                                           transform=torchvision.transforms.ToTensor(),
                                           download=True)
test_dataset = torchvision.datasets.MNIST(root='./data',
                                          train=False,
                                          transform=torchvision.transforms.ToTensor(),
                                          download=True)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Instantiate the model 
model = Classifier()

# Move the model to the GPU if available 
model.to(device)
summary(model, (1, 28, 28))

# 定义损失函数和优化器: 优化器和损失函数分别用于训练过程中更新模型的参数和评估模型的性能。
# nn.CrossEntropyLoss() 是一个 PyTorch 函数，用于创建交叉熵损失函数的实例。
# 交叉熵损失通常用于分类问题，因为它可以测量预测类别概率与真实类别之间的差异。它是通过对真实类别的预测类别概率取负对数来计算的。
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.0001)

# 定义训练过程的批大小和轮数。批大小是用于神经网络一次前向和后向传递的训练数据集样本数。num是整个训练数据集的次数。
batch_size = 100
num_epochs = 10
# Split the training set into training and validation sets 
val_percent = 0.2  # percentage of the data used for validation
val_size = int(val_percent * len(train_dataset))
train_size = len(train_dataset) - val_size
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [train_size, val_size])

# Create DataLoaders for the training and validation sets 
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, pin_memory=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False, pin_memory=True)
losses = []
accuracies = []
val_losses = []
val_accuracies = []
# Train the model 
for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):
        # Forward pass
        images = images.to(device)
        labels = labels.to(device)
        outputs = model(images)
        loss = criterion(outputs, labels)

        # Backward pass and optimization 清除所有可优化参数的梯度
        optimizer.zero_grad()
        # 计算关于模型参数的损失的梯度
        loss.backward()
        # 根据计算的梯度更新模型的参数
        optimizer.step()

        _, predicted = torch.max(outputs.data, 1)
    acc = (predicted == labels).sum().item() / labels.size(0)
    accuracies.append(acc)
    losses.append(loss.item())

    # Evaluate the model on the validation set
    val_loss = 0.0
    val_acc = 0.0
    with torch.no_grad():
        for images, labels in val_loader:
            labels = labels.to(device)
            images = images.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)
            val_loss += loss.item()

            _, predicted = torch.max(outputs.data, 1)
        total = labels.size(0)
        correct = (predicted == labels).sum().item()
        val_acc += correct / total
        val_accuracies.append(acc)
        val_losses.append(loss.item())

    print('Epoch [{}/{}],Loss:{:.4f},Validation Loss:{:.4f},Accuracy:{:.2f},Validation Accuracy:{:.2f}'.format(
        epoch + 1, num_epochs, loss.item(), val_loss, acc, val_acc))

# 模型训练完成后，我们可以绘制训练和验证损失和准确率曲线。这可以让我们了解模型在未知数据上的表现，以及模型是否过度拟合或欠拟合。
plt.plot(range(num_epochs),
         losses, color='red',
         label='Training  Loss',
         marker='o')
plt.plot(range(num_epochs),
         val_losses,
         color='blue',
         linestyle='--',
         label='Validation  Loss',
         marker='x')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training  and  Validation  Loss')
plt.legend()
plt.show()

#  Plot  the  training  and  validation  accuracy  over  time  
plt.plot(range(num_epochs),
         accuracies,
         label='Training  Accuracy',
         color='red',
         marker='o')
plt.plot(range(num_epochs),
         val_accuracies,
         label='Validation  Accuracy',
         color='blue',
         linestyle=':',
         marker='x')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Training  and  Validation  Accuracy')
plt.legend()
plt.show()

#  Create  a  DataLoader  for  the  test  dataset
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

#  Evaluate  the  model  on  the  test  dataset  
model.eval()

with torch.no_grad():
    correct = 0
    total = 0
    y_true = []
    y_pred = []
    for images, labels in test_loader:
        images = images.to(device)
        labels = labels.to(device)
        outputs = model(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()
        predicted = predicted.to('cpu')
        labels = labels.to('cpu')
        y_true.extend(labels)
        y_pred.extend(predicted)

print('Test  Accuracy:  {}%'.format(100 * correct / total))

#  Generate  a  classification  report
# from sklearn.metrics import classification_report
#
# print(classification_report(y_true, y_pred))
