"""
深度学习实验1：使用PyTorch实现MLP，并在MNIST数据集上验证
操作系统：Linux  Ubuntu 18.04.6 LTS
torch版本：torch 1.11.0
cuda版本：11.3
显卡：Nvidia RTX 2080Ti
IDE：PyCharm 2020.2.5 (Professional Edition)
"""
import datetime
import os

import torch
import torchvision
from sklearn.metrics import classification_report
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from torch.nn import functional
import matplotlib.pyplot as plt

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


# 搭建网络结构
# 共四层，除输入层外，其余为全连接层
# 建立MLP直接运用 torch 中的体系. 先定义所有的层属性(__init__()), 然后再一层层搭建(forward(x))层与层的关系链
class MLP(torch.nn.Module):  # 继承 torch 的 Module
    def __init__(self, in_features, out_features):
        """
        初始化

        :param in_features: 输入MLP的一个样本的总特征数
        :param out_features: MLP输出的分类结果数
        """
        super(MLP, self).__init__()  # 调用父类构造函数，继承 __init__ 功能
        self.in_features = in_features
        self.out_features = out_features
        self.dense_1_out_features = 256  # 隐藏层1神经元个数
        self.dense_2_out_features = 128  # 隐藏层2神经元个数
        self.dense_1 = torch.nn.Linear(in_features=in_features, out_features=self.dense_1_out_features)  # 隐藏层1
        self.dense_2 = torch.nn.Linear(in_features=self.dense_1_out_features,
                                       out_features=self.dense_2_out_features)  # 隐藏层2
        self.out = torch.nn.Linear(in_features=self.dense_2_out_features, out_features=out_features)  # 输出层

    def forward(self, x_in):
        """
        前向传播

        :return:
        """
        x_in = x_in.view(-1, self.in_features)  # 将一张图片的Tensor展平成一行
        x = functional.relu(self.dense_1(x_in))  # relu激活
        x = functional.relu(self.dense_2(x))  # relu激活
        out = functional.softmax(self.out(x), dim=1)  # 最后分类用softmax， dim=1表示按列进行softmax
        return out


def gpu_is_available():
    """
    判断GPU是否可用

    :return:
    """
    print(torch.__version__)
    # 返回当前设备索引
    print(torch.cuda.current_device())
    # cuda是否可用
    print(torch.cuda.is_available())
    # 返回GPU的数量
    for i in range(torch.cuda.device_count()):
        # 返回gpu名字，设备索引默认从0开始
        print('{} -> {}'.format(i, torch.cuda.get_device_name(i)))


def load_mnist(batch_size: int):
    """
    mnist数据读取

    :param batch_size: 批大小
    :return: train_loader, test_loader
    """
    # 预处理
    pre_transform = transforms.Compose([
        transforms.ToTensor()
    ])
    # 下载数据集
    train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=pre_transform, download=True)
    test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=pre_transform, download=True)
    # 加载数据集
    train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)

    # 展示几张图片
    # images, labels = next(iter(train_loader))
    # img = torchvision.utils.make_grid(images)
    #
    # img = img.numpy().transpose(1, 2, 0)
    # print(labels)
    # plt.imshow(img)
    # plt.show()
    return train_loader, test_loader


def train(model: torch.nn.Module, train_loader: DataLoader, lr: float, epochs: int, log_batch: int, save_path: str,
          verbose: int = 2):
    """
    定义优化器,定义损失函数,开始训练

    :param model: 继承自torch.nn.Module模型
    :param train_loader: 训练装载器
    :param lr: 学习率
    :param epochs: 迭代轮次
    :param log_batch: 每多少轮打印一次损失
    :param save_path: 模型参数保存路径
    :param verbose: 0：不输出每个batch的信息且不输出每个epoch之后的classification_report； 1：不输出每个epoch之后的classification_report；2：全部输出
    :return:
    """
    start_time = datetime.datetime.now()
    print('[{}] Start training with device <{}>'.format(start_time, device))
    opt = torch.optim.SGD(params=model.parameters(), lr=lr)  # 定义优化器：本实验使用随机梯度下降优化器，传入params进行优化
    loss_function = functional.cross_entropy  # 定义损失函数：交叉熵，分类常用
    for epoch in range(epochs):
        epoch_start_time = datetime.datetime.now()
        train_loss = .0
        x_train_num = len(train_loader.dataset)
        batch_num = len(train_loader)
        model.train()
        for index, (x_train, y_train) in enumerate(train_loader):
            x_train, y_train = x_train.cuda(), y_train.cuda()
            # 每个批量进行
            opt.zero_grad()  # 清空上一步的梯度
            out = model(x_train)  # 前向传播
            loss = loss_function(out, y_train, reduction='sum')  # 计算损失,不需要对样本进行平均
            loss.backward()  # 误差反向传播，计算更新值
            opt.step()  # 将更新值施加到model参数中
            if index % int((1 / log_batch) * batch_num) == 0 and verbose != 0:  # 打印损失
                # 除了loss.backward()之外的loss调用都必须改成loss.item()，否则会不断递增地占据内存。
                print('[{}] Training epoch: {} [{}/{} ({:.0f}%)] \tbatch_avg_loss: {:.6f}'.format(
                    datetime.datetime.now(),
                    epoch, index * x_train.size(0), x_train_num,
                           100. * index / len(train_loader), loss.item() / x_train.size(0)))
            train_loss += loss.item()
        train_loss = train_loss / x_train_num  # 每个样本平均损失
        epoch_end_time = datetime.datetime.now()
        # 计算accuracy
        valid_loss = .0
        total = 0
        acc = 0
        pred_total = []
        truth_total = []
        model.eval()
        for x_valid, y_valid in train_loader:
            # 每个批量进行
            x_valid, y_valid = x_valid.cuda(), y_valid.cuda()
            out = model(x_valid)  # 前向传播
            valid_loss += loss_function(out, y_valid, reduction='sum')  # test_loss不需要对样本进行平均
            pred = torch.argmax(out.data, dim=-1).tolist()
            truth = y_valid.tolist()
            total += len(truth)
            for i in range(len(truth)):
                if pred[i] == truth[i]:
                    acc += 1
            pred_total += pred
            truth_total += truth
        print('[{}] End epoch: {} \tepoch_avg_loss:{:.4f} \ttrain_accuracy:{:.2f} \tcosts {}'.format(epoch_end_time,
                                                                                                     epoch, train_loss,
                                                                                                     acc / total,
                                                                                                     epoch_end_time - epoch_start_time))
        if verbose == 2:
            print(classification_report(y_pred=pred_total, y_true=truth_total))
    end_time = datetime.datetime.now()
    print('[{}] End training, costs {}'.format(end_time, end_time - start_time))
    torch.save(model.state_dict(), save_path)  # 保存模型


def mlp_test(model: torch.nn.Module, test_loader):
    """
    定义优化器,定义损失函数,开始训练

    :param model: 继承自torch.nn.Module模型
    :param test_loader: 测试装载器
    :return:
    """
    test_loss = 0
    loss_function = functional.cross_entropy  # 定义损失函数：交叉熵，分类常用
    pred = []
    truth = []
    model.eval()
    with torch.no_grad():  # 不记录梯度
        for x_test, y_test in test_loader:
            # 每个批量进行
            x_test, y_test = x_test.cuda(), y_test.cuda()
            out = model(x_test)  # 前向传播
            test_loss += loss_function(out, y_test, reduction='sum')  # test_loss不需要对样本进行平均
            pred += torch.argmax(out.data, dim=-1).tolist()
            truth += y_test.tolist()
    test_loss /= len(test_loader.dataset)  # 平均损失
    print('Test_avg_loss: {:.4f}'.format(test_loss))
    print(classification_report(y_pred=pred, y_true=truth))


def main():
    """
    主函数

    :return:
    """
    saved_path = './saved_model/model.pth'
    parent_path = os.path.abspath(os.path.dirname(saved_path))
    if not os.path.exists(parent_path):
        os.makedirs(parent_path)
    batch_size = 32
    lr = 1e-2
    epochs = 10
    log_batch = 5
    train_loader, test_loader = load_mnist(batch_size=batch_size)
    if torch.cuda.is_available():
        mlp = torch.nn.DataParallel(MLP(in_features=28 * 28, out_features=10).cuda(device=device))
    else:
        mlp = MLP(in_features=28 * 28, out_features=10)
    # 训练
    train(model=mlp, train_loader=train_loader, lr=lr, epochs=epochs, log_batch=log_batch, save_path=saved_path,
          verbose=1)
    # 测试
    mlp.load_state_dict(torch.load(saved_path), strict=False)
    mlp_test(mlp, test_loader)


if __name__ == '__main__':
    gpu_is_available()
    main()
