import argparse
import sys

import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
from tqdm import tqdm
from model import Net


# 设置训练参数
def set_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--epoch', type=int, default=10)
    parser.add_argument('--batch_size', type=int, default=32)

    ret_args = parser.parse_args()
    return ret_args


def train(e):
    total_loss = 0
    total_correct = 0
    total_data = 0

    net.train()
    bar = tqdm(train_loader, unit='batch')
    bar.set_description_str("epoch {} -    train".format(e))
    for data in bar:
        images, labels = data
        images = images.to(device)
        labels = labels.to(device)

        # 梯度清零
        optimizer.zero_grad()
        # 正向传播
        outputs = net(images)
        _, predicted = torch.max(outputs.data, dim=1)
        total_correct += torch.eq(predicted, labels).sum().item()
        # 计算损失
        loss = criterion(outputs, labels)
        total_loss += loss.item()
        # 反向传播
        loss.backward()
        # 权重更新
        optimizer.step()
        total_data += labels.size(0)

        acc = 100 * total_correct / total_data
        bar.set_postfix_str("Acc: {} %".format(acc))


def validate(e):
    total_loss = 0
    total_correct = 0
    total_data = 0

    with torch.no_grad():
        bar = tqdm(test_loader, unit='batch')
        bar.set_description_str("epoch {} - validate".format(e))
        for data in bar:
            images, labels = data
            images = images.to(device)
            labels = labels.to(device)

            # 正向传播
            outputs = net(images)
            _, predicted = torch.max(outputs.data, dim=1)
            total_correct += torch.eq(predicted, labels).sum().item()
            # 计算损失
            loss = criterion(outputs, labels)
            total_loss += loss.item()

            total_data += labels.size(0)

            acc = 100 * total_correct / total_data
            bar.set_postfix_str("Acc: {} %".format(acc))


if '__main__' == __name__:
    # 准备训练参数
    args = set_args()

    # 格式转换
    trans = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize(0.1307, 0.3081)])
    # 加载MINST数据集, 若不存在则自动下载
    # 训练集
    train_set = datasets.MNIST(root='../data/dataset/minst', train=True,
                               download=True, transform=trans)
    # 测试集
    test_set = datasets.MNIST(root='../data/dataset/minst', train=False,
                              download=True, transform=trans)
    # 数据加载器
    train_loader = DataLoader(dataset=train_set, batch_size=args.batch_size, shuffle=True)
    test_loader = DataLoader(dataset=test_set, batch_size=args.batch_size, shuffle=False)

    print('数据集已加载')

    net = Net()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net.to(device)

    print('模型创建成功')
    sys.stdout.flush()

    # 损失函数和优化器
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=0.00001)
    # 设置动态学习率
    scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.9)

    nr_epoch = args.epoch
    for epoch in range(nr_epoch):
        train(epoch)
        validate(epoch)
