import torch
import torchvision
from torch import nn
from torch.utils import data
from torchvision import transforms

# 记录
from torch.utils.tensorboard import SummaryWriter
import logging


LOG_FORMAT = "%(asctime)s++++++%(message)s"

# 打印设备
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print("///////", device)

# 定义输出log和画图文件的名称
ScaleName = './plot/test'
LogName = './log/test.log'
logging.basicConfig(filename=LogName, level=logging.DEBUG, format=LOG_FORMAT)


# mnist数据集，放在迭代器中，小批量
def load_data_mnist(batch_size):
    trans = [transforms.ToTensor()]
    trans = transforms.Compose(trans)
    # 训练集
    mnist_train = torchvision.datasets.MNIST(
        root="./data", train=True, transform=trans, download=True)
    # 测试集
    mnist_test = torchvision.datasets.MNIST(
        root="./data", train=False, transform=trans, download=True)
    print('训练集长度：', len(mnist_train))
    print('图片形状：', mnist_train[0][0].shape)
    print('lable:', mnist_train[0][1])
    return (data.DataLoader(mnist_train, batch_size, shuffle=True), 
            data.DataLoader(mnist_test, batch_size, shuffle=False))


batch_size = 256
train_loader, test_loader = load_data_mnist(batch_size)


# 构建网络，查看每一层的情况
class lenet(nn.Module):
    def __init__(self):
        super().__init__()
        self.dnn = nn.Sequential(
            # 可以更改网络结构再进行训练
            nn.Conv2d(1, 6, 5, padding=2), nn.Sigmoid(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(6, 16, 5), nn.Sigmoid(),
            nn.MaxPool2d(2, 2), nn.Flatten(),
            nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),
            nn.Linear(120, 84), nn.Sigmoid(),
            nn.Linear(84, 10))

    def forward(self, x):
        out = self.dnn(x)
        return out


# 每一个epoch的训练函数，epoch表示模型在整个训练数据集上完整训练一次的次数
def train(epoch, model):
    train_loss = 0    # 损失
    train_accnum = 0    # 准确率
    # 模型变成训练模式
    model.train()
    for feature, label in train_loader:
        # 标签以及图片放进Gpu
        feature = feature.to(device)
        label = label.to(device)
        # 上一轮的梯度置零
        optimizer.zero_grad()
        # 放入到网络中输出预测 preds一维的
        preds = model(feature)
        # 计算损失
        loss = criterion(preds, label)
        # 反向传播，计算梯度
        loss.backward()
        # 更新权值
        optimizer.step()
        # 计算一次训练中，正确的个数
        train_accnum += (preds.argmax(1) == label).sum()
        # 计算损失
        train_loss += loss.item() / len(train_loader)

    # 计算本次训练的准确率
    train_acc = train_accnum / len(train_loader.dataset)
    # 输出一下
    print(f'Epoch:{epoch:3} | Train Loss:{train_loss:6.4f} | Train Acc:{train_acc:6.4f}')
    return (float(train_acc), float(train_loss))


# 每一个epoch的测试函数
def test(model):
    test_accnum = 0
    model.eval()
    with torch.no_grad():
        for feature, label in test_loader:
            feature = feature.to(device)
            label = label.to(device)
            preds = model(feature)
            test_accnum += (preds.argmax(1) == label).sum()
    test_acc = test_accnum / len(test_loader.dataset)
    print(f'Test Acc:{test_acc:6.4f}')
    return float(test_acc)


# 训练主函数 输入网络和epochs
def Train(epochs, net, name):
    max = 0
    writer = SummaryWriter(ScaleName)
    logging.debug("start training name:" + name + "--")  # 参数msg
    print("///start training name:"+name+"///")  # 参数msg
    for epoch in range(epochs):
        # 训练
        train_acc, train_loss = train(epoch, net)
        # 测试输出
        test_acc = test(net)
        # 画图
        writer.add_scalars(name, {'test_acc': test_acc, 'train_acc': train_acc, 'train_loss': train_loss}, epoch)
        # 获取到最优的输出进行保存和打印
        if max < test_acc:
            max = test_acc
            print("max:", max, "epoch:", epoch)
            torch.save(net.state_dict(), "model/net_{}.pth".format(name))
            logging.debug("max:"+str(max)+"epoch:"+str(epoch))  # msg
    writer.close()
    logging.debug("///训练完成///")  # msg


# 开始训练
net = lenet()
# 将定义的神经网络模型移动到指定的计算设备上，net是定义的LeNet模型实例
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
Train(20, net, "L1")
