import torch
import torch.nn.functional as F
from A5_implement_multilayer_neural_networks import NeuralNetwork
from A6_setup_efficient_data_loader import train_loader, x_train, y_train, test_loader

torch.manual_seed(123)

# 神经网络模型  2个输入，2个输出
model = NeuralNetwork(num_inputs=2, num_outputs=2)

# led the optimizer needs to know which parameters to optimize
optimizer = torch.optim.SGD(model.parameters(), lr=0.5)

num_epochs = 3

for epoch in range(num_epochs):
    # put the model in training mode
    # “计算损失→求梯度→更新参数” 的循环，就是训练的核心—— 模型通过不断调整参数，使预测结果（logits）越来越接近真实标签
    model.train()

    for batch_idx, (features, labels) in enumerate(train_loader):
        print("batch_idx: ", batch_idx)
        print("features: ", features)
        print("labels: ", labels)

        logits = model(features)
        print("logits: ", logits)
        # 计算损失
        loss = F.cross_entropy(logits, labels)
        # 清除之前批次的梯度 .grad，避免干扰backward计算本次梯度
        optimizer.zero_grad()
        # 计算梯度
        loss.backward()
        # 更新参数
        optimizer.step()

        ### logging
        print(f"Epoch: {epoch+1:03d}/{num_epochs:03d}"
              f" | Batch {batch_idx:03d}/{len(train_loader):03d}"
              f" | Train Loss: {loss:.2f}")

    print("==========================")
    print("model evaluate mode")
    print("==========================")
    # 在训练结束后，将模型设置为评估模式
    model.eval()
    with torch.no_grad():
        outputs = model(x_train)
    print("outputs: ", outputs)
    torch.set_printoptions(sci_mode=False)
    probas = torch.softmax(outputs, dim=1)
    print("probas: ", probas)

    predictions = torch.argmax(probas, dim=1)
    print("predictions: ", predictions)
    print(predictions == y_train)
    print("correct num: ", torch.sum(predictions == y_train))
    print("--------------")

"""
Epoch: 001/003 | Batch 000/002 | Train Loss: 0.75
Epoch: 001/003 | Batch 001/002 | Train Loss: 0.65
Epoch: 002/003 | Batch 000/002 | Train Loss: 0.44
Epoch: 002/003 | Batch 001/002 | Train Loss: 0.13
Epoch: 003/003 | Batch 000/002 | Train Loss: 0.03
Epoch: 003/003 | Batch 001/002 | Train Loss: 0.00
"""

def compute_accuracy(model, dataloader):
    model = model.eval()
    correct = 0.
    total_examples = 0

    for idx, (features, labels) in enumerate(dataloader):
        # 关闭梯度计算，以避免干扰backward计算
        with torch.no_grad():
            logits = model(features)

        # 计算预测值
        print("logits: ", logits)
        """
        argmax 在第一维，也就是沿着行的方向选取最大的索引
        
        logits:  tensor([
                          [ 2.4088, -3.5848],
                          [-1.7179,  1.7334]])
        logits是二维的，shape为 (2,2), 计算argmax，在第一维度上，也就是沿着行方向选取最大的索引
        [ 2.4088, -3.5848], 第一行最大值为2.4088, 索引为0
        [-1.7179,  1.7334], 第二行最大值为1.7334, 索引为1
        所以predictions为 [0, 1]
        predictions:  tensor([0, 1])
        """
        predictions = torch.argmax(logits, dim=1)
        print("predictions: ", predictions)
        print("labels: ", labels)
        compare = labels == predictions
        print("compare: ", compare)
        correct += torch.sum(compare)
        print("correct: ", correct)
        total_examples += len(compare)
        print("total_examples: ", total_examples)

    return (correct / total_examples).item()

print(compute_accuracy(model, train_loader))
print(compute_accuracy(model, test_loader))

# 保存模型 state_dict is python dict, maps each layer in the model to its trainable parameters(weights and biases)
print(model.state_dict())

# model.pth is arbitrary file, .pth, .pt are the most common file extensions for PyTorch models
torch.save(model.state_dict(), "model.pth")
