import time
import torch
import torch.nn as nn
import torch.optim as optim
from data_loader import *

# 可视化 https://zhuanlan.zhihu.com/p/561308276

class Network(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv2d(1, 10, 5)
        self.conv2 = nn.Conv2d(10, 20, 3)
        # 下面创建两个全连接层：
        self.fc1 = nn.Linear(2000, 500)  # 第一个全连接层
        self.fc2 = nn.Linear(500, 10)  # 第二个全连接层

    def forward(self, x):
        out=x.view(-1, 1, 28, 28) #(batch_size, channels, height, width)
        out = self.conv1(out)  # 将x输入第一个卷积层
        out = torch.relu(out)
        out = torch.max_pool2d(out, 2, 2)
        out = self.conv2(out)
        out = torch.relu(out)
        out = out.view(x.size(0), -1)  # 对out扁平化后，以送入全连接层
        out = self.fc1(out)  # 将x输入第一个全连接层
        out = torch.relu(out)
        out = self.fc2(out)  # 将x输入第二个全连接层
        out = torch.log_softmax(out, dim=1)
        return out


if __name__ == '__main__':
    start = time.time()

    train_data, validation_data, test_data = load_data_wrapper()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    network = Network().to(device)

    criterion = nn.CrossEntropyLoss()  # 选择交叉熵损失函数
    # 注意：选择SGD优化器或者AdamW优化器，权重衰减 “weight_decay” 才等价于“L2正则化”
    optimizer = optim.AdamW(network.parameters(), lr=0.001, weight_decay=0.001)
    num_epochs = 10  # 训练迭代次数
    num_stop = 5  # 如果验证准确率连续多个epoch没有提升，则early_stopping
    best_val = 0  # 最高验证准确率
    num_without_improvement = 0  # 验证准确率已经多少个epoch没有提升
    best_model_para = None

    for epoch in range(num_epochs):
        # 模型设置为训练模式，反向传播、更新参数有效
        network.train()
        epoch_loss = 0
        for x, y in train_data:
            x, y = x.to(device), y.to(device)
            pre_y = network(x)
            loss = criterion(pre_y, y)
            optimizer.zero_grad()
            loss.backward()
            epoch_loss+=loss.item()
            optimizer.step()


        # 模型设置为评估模式。
        network.eval()  # 注意：eval()模式不更新参数，但依然会计算梯度
        correct = 0
        total = 0
        with torch.no_grad():  # 使用 no_grad() 的情况下，不计算梯度、不更新参数，前面的 network.eval() 可以删去
            for x, y in validation_data:
                x, y = x.to(device), y.to(device)
                output = network(x)
                _, predicted = torch.max(output, dim=1)
                total += y.size(0)
                correct += (predicted == y).sum()
            validation_accuracy = 100 * correct / total
            print(f'迭代次数： {epoch + 1}/{num_epochs},训练损失值：{epoch_loss}, 验证精度: {validation_accuracy:.2f}%')

        if validation_accuracy > best_val:
            num_without_improvement = 0
            best_val = validation_accuracy
            best_model_para = network.state_dict()  # 保存在验证集上表现最佳的模型参数
        else:
            num_without_improvement += 1
            if num_without_improvement == num_stop:
                print("精度没有提升，停止训练 ", epoch)
                break
    #保存模型的参数
    torch.save(network,'cnn.pth')


    # 读出最佳模型参数，在测试集上进行评估
    network.load_state_dict(best_model_para)
    correct = 0
    total = 0
    with torch.no_grad():
        for x, y in test_data:
            x, y = x.to(device), y.to(device)
            output = network(x)
            _, predicted = torch.max(output, dim=1)
            total += y.size(0)
            correct += (predicted == y).sum()
    test_accuracy = 100 * correct / total
    print(f'测试精度: {test_accuracy:.2f}%')

    end = time.time()
    print('预训练模型花费时间为：%.2f秒' % (end - start))
