"""
深度学习实验1：使用PyTorch实现MLP，并在MNIST数据集上验证
操作系统：Linux  Ubuntu 18.04.6 LTS
torch版本：torch 1.11.0
cuda版本：11.3
显卡：Nvidia RTX 2080Ti
IDE：PyCharm 2020.2.5 (Professional Edition)
"""
import argparse
import datetime
import os

import torch
import torchvision
from sklearn.metrics import classification_report
from torch import Tensor
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from torch.nn import functional
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
import torch.nn.functional as F

from torch.nn.parallel import DistributedDataParallel as DDP
from torch import distributed as dist

# 初始化

parser = argparse.ArgumentParser()
parser.add_argument('--local_rank', type=int, help="local gpu id")
args = parser.parse_args()
dist.init_process_group(backend='nccl', init_method='env://')
torch.cuda.set_device(args.local_rank)
global_rank = dist.get_rank()

# In[1]我们需要用一个普通的线性层来投影它们
# 创建一个PatchEmbedding类来保持代码整洁
'''
注意：在检查了最初的实现之后，我发现为了提高性能，作者使用了Conv2d层而不是线性层,
这是通过使用与“patch_size”相等的kernel_size和stride 来获得的。
直观地说，卷积运算分别应用于每个切片。
因此，我们必须首先应用conv层，然后将生成的图像展平
'''


class PatchEmbedding(torch.nn.Module):
    def __init__(self, in_channels: int = 3, patch_size: int = 16, emb_size: int = 768, img_size: int = 224):
        self.patch_size = patch_size
        super().__init__()
        self.projection = torch.nn.Sequential(
            # 用卷积层代替线性层->性能提升
            torch.nn.Conv2d(in_channels, emb_size, kernel_size=patch_size, stride=patch_size),
            # 先用大小为切片大小，步长为切片步长的卷积核来提取特征图，然后将特征图展平
            # [32, 192, 4, 4] -> [32, 16, 192]
            Rearrange('b e (h) (w) -> b (h w) e'),
        )
        # token层，用于分类
        self.cls_token = torch.nn.Parameter(torch.randn(1, 1, emb_size))
        # position embedding 这里实现的是通过网络来学习PE
        # self.positions = torch.nn.Parameter(torch.randn(1, (img_size // patch_size) ** 2 + 1, emb_size))

    def forward(self, x: Tensor) -> Tensor:
        b, _, _, _ = x.shape
        # x [32, 1, 28, 28]
        x = self.projection(x)
        # x [32, 16, 192],  N = 16（16个patch）。192为attention的emb_size
        # self.cls_token [1,1,emb_size] 重复batch_size次
        cls_tokens = repeat(self.cls_token, '() n e -> b n e', b=b)
        # cls_tokens [32, 1, 192]
        # 在输入前添加cls标记
        x = torch.cat([cls_tokens, x], dim=1)
        # # 加位置嵌入;注意：加上可能效果不如不加，因此本处不加
        # positions = repeat(self.positions, '() n e -> b n e', b=b)
        # x += positions
        # # x [32, 17, 192]
        return x


# In[1]分类器
class ClassificationHead(torch.nn.Sequential):
    def __init__(self, emb_size: int = 768, n_classes: int = 1000):
        super().__init__(
            # input [32, 17, 192]
            # 先直接按patch_num维度求平均，把[32, 17, 192]变成[32, 192]
            Reduce('b n e -> b e', reduction='mean'),
            torch.nn.LayerNorm(emb_size),
            torch.nn.Linear(emb_size, n_classes))


# 组成PatchEmbedding、TransformerEncoder和ClassificationHead来创建最终的ViT架构
class TransformerFull(torch.nn.Module):
    def __init__(self, in_channels: int = 1, patch_size: int = 7, emb_size: int = 192, img_size: int = 28,
                 n_classes: int = 10, **kwargs):
        super().__init__()
        self.patch_embedding = PatchEmbedding(in_channels, patch_size, emb_size, img_size)
        self.query = torch.nn.Conv1d(int(img_size / patch_size) ** 2 + 1, int(img_size / patch_size) ** 2 + 1,
                                     kernel_size=(3,), padding=1)
        self.transformer = torch.nn.Transformer(d_model=emb_size, nhead=8, num_decoder_layers=2, num_encoder_layers=2,
                                                dim_feedforward=emb_size * 4)
        self.cls = ClassificationHead(emb_size, n_classes)

    def forward(self, x):
        x = self.patch_embedding(x)
        query_emd = self.query(x)
        x = self.transformer(x, query_emd)
        x = self.cls(x)
        return x


def gpu_is_available():
    """
    判断GPU是否可用

    :return:
    """
    print(torch.__version__)
    # 返回当前设备索引
    print(torch.cuda.current_device())
    # cuda是否可用
    print(torch.cuda.is_available())
    # 返回GPU的数量
    for i in range(torch.cuda.device_count()):
        # 返回gpu名字，设备索引默认从0开始
        print('{} -> {}'.format(i, torch.cuda.get_device_name(i)))


def load_mnist(batch_size: int):
    """
    mnist数据读取

    :param batch_size: 批大小
    :return: train_loader, test_loader
    """
    # 预处理
    pre_transform = transforms.Compose([
        transforms.ToTensor()
    ])
    # 下载数据集
    # train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=pre_transform, download=True)
    # test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=pre_transform, download=True)
    train_dataset = torchvision.datasets.FashionMNIST(root='./data', train=True, transform=pre_transform, download=True)
    test_dataset = torchvision.datasets.FashionMNIST(root='./data', train=False, transform=pre_transform, download=True)
    # 加载数据集
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, shuffle=False,
                              pin_memory=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, pin_memory=True)
    # train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
    # test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)

    # 展示几张图片
    # images, labels = next(iter(train_loader))
    # img = torchvision.utils.make_grid(images)
    #
    # img = img.numpy().transpose(1, 2, 0)
    # print(labels)
    # plt.imshow(img)
    # plt.show()
    return train_loader, test_loader, train_sampler


def train(model: torch.nn.Module, train_loader: DataLoader, opt, epochs: int, log_batch: int, save_path: str, sampler,
          verbose: int = 2):
    """
    定义优化器,定义损失函数,开始训练

    :param model: 继承自torch.nn.Module模型
    :param train_loader: 训练装载器
    :param opt: 优化器
    :param epochs: 迭代轮次
    :param log_batch: 每多少轮打印一次损失
    :param save_path: 模型参数保存路径
    :param verbose: 0：不输出每个batch的信息且不输出每个epoch之后的classification_report； 1：不输出每个epoch之后的classification_report；2：全部输出
    :return:
    """

    def reduce_loss(tensor, rank, world_size):
        with torch.no_grad():
            dist.reduce(tensor, dst=0)
            if rank == 0:
                tensor /= world_size

    start_time = datetime.datetime.now()
    if global_rank == 0:
        print('[{}] Start training'.format(start_time))
    loss_function = functional.cross_entropy  # 定义损失函数：交叉熵，分类常用
    for epoch in range(epochs):
        # DistributedSampler deterministically shuffle data
        # by seting random seed be current number epoch
        # so if do not call set_epoch when start of one epoch
        # the order of shuffled data will be always same
        sampler.set_epoch(epoch)

        epoch_start_time = datetime.datetime.now()
        train_loss = .0
        x_train_num = len(train_loader.dataset)
        batch_num = len(train_loader)
        model.train()
        for index, (x_train, y_train) in enumerate(train_loader):
            x_train, y_train = x_train.cuda(), y_train.cuda()
            # 每个批量进行
            opt.zero_grad()  # 清空上一步的梯度
            out = model(x_train)  # 前向传播
            loss = loss_function(out, y_train, reduction='sum')  # 计算损失,不需要对样本进行平均
            loss.backward()  # 误差反向传播，计算更新值
            opt.step()  # 将更新值施加到model参数中
            reduce_loss(loss, global_rank, torch.cuda.device_count())

            if index % int((1 / log_batch) * batch_num) == 0 and verbose != 0 and global_rank == 0:  # 打印损失
                # 除了loss.backward()之外的loss调用都必须改成loss.item()，否则会不断递增地占据内存。
                print('[{}] Training epoch: {} [{}/{} ({:.0f}%)] \tbatch_avg_loss: {:.6f}'.format(
                    datetime.datetime.now(),
                    epoch, index * x_train.size(0), x_train_num,
                           100. * index / len(train_loader), loss.item() / x_train.size(0)))
            train_loss += loss.item()
        train_loss = train_loss / x_train_num  # 每个样本平均损失
        epoch_end_time = datetime.datetime.now()
        # 计算accuracy
        valid_loss = .0
        total = 0
        acc = 0
        pred_total = []
        truth_total = []
        model.eval()
        with torch.no_grad():
            for x_valid, y_valid in train_loader:
                # 每个批量进行
                x_valid, y_valid = x_valid.cuda(), y_valid.cuda()
                out = model(x_valid)  # 前向传播
                valid_loss += loss_function(out, y_valid, reduction='sum').item()  # test_loss不需要对样本进行平均
                pred = torch.argmax(out.data, dim=-1).tolist()
                truth = y_valid.tolist()
                total += len(truth)
                for i in range(len(truth)):
                    if pred[i] == truth[i]:
                        acc += 1
                pred_total += pred
                truth_total += truth
        if global_rank == 0:
            print('[{}] End epoch: {} \tepoch_avg_loss:{:.4f} \ttrain_accuracy:{:.2f} \tcosts {}'.format(
                epoch_end_time,
                epoch,
                train_loss,
                acc / total,
                epoch_end_time - epoch_start_time))
        if verbose == 2 and global_rank == 0:
            print(classification_report(y_pred=pred_total, y_true=truth_total))

    end_time = datetime.datetime.now()
    if global_rank == 0:
        print('[{}] End training, costs {}'.format(end_time, end_time - start_time))
        torch.save(model.state_dict(), save_path)  # 保存模型


def mlp_test(model: torch.nn.Module, test_loader):
    """
    定义优化器,定义损失函数,开始训练

    :param model: 继承自torch.nn.Module模型
    :param test_loader: 测试装载器
    :return:
    """
    model.eval()
    test_loss = 0
    loss_function = functional.cross_entropy  # 定义损失函数：交叉熵，分类常用
    pred = []
    truth = []
    with torch.no_grad():  # 不记录梯度
        for x_test, y_test in test_loader:
            # 每个批量进行
            x_test, y_test = x_test.cuda(), y_test.cuda()
            out = model(x_test)  # 前向传播
            test_loss += loss_function(out, y_test, reduction='sum')  # test_loss不需要对样本进行平均
            pred += torch.argmax(out.data, dim=-1).tolist()
            truth += y_test.tolist()
    test_loss /= len(test_loader.dataset)  # 平均损失
    if global_rank == 0:
        print('Test_avg_loss: {:.4f}'.format(test_loss))
        print(classification_report(y_pred=pred, y_true=truth))


def main():
    """
    主函数

    :return:
    """
    saved_path = './saved_model/model.pth'
    parent_path = os.path.abspath(os.path.dirname(saved_path))
    if not os.path.exists(parent_path):
        os.makedirs(parent_path)
    batch_size = 32
    lr = 1e-3  # FashionMNIST
    epochs = 10
    log_batch = 5
    train_loader, test_loader, sampler = load_mnist(batch_size=batch_size)
    if torch.cuda.is_available():
        model = TransformerFull().cuda()
        opt = torch.optim.SGD(params=model.parameters(), lr=lr)  # 定义优化器：本实验使用随机梯度下降优化器，传入params进行优化
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
        model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank)
    else:
        model = TransformerFull()
        opt = torch.optim.SGD(params=model.parameters(), lr=lr)  # 定义优化器：本实验使用随机梯度下降优化器，传入params进行优化
    # 训练
    train(model=model, train_loader=train_loader, opt=opt, epochs=epochs, log_batch=log_batch, save_path=saved_path,
          verbose=1, sampler=sampler)

    # 测试
    if global_rank == 0:
        model.load_state_dict(torch.load(saved_path), strict=False)
        mlp_test(model, test_loader)


if __name__ == '__main__':
    main()
    # python -m torch.distributed.launch --nproc_per_node=4 transformer.py
