# -*- coding:utf-8 -*-#
# @Time:2023/5/31 14:46
# @Author:Adong
# @Software:PyCharm

"""
CNN训练文件，
重要文件，谨慎修改！
重要文件，谨慎修改！
重要文件，谨慎修改！
重要文件，谨慎修改！
重要文件，谨慎修改！
"""


import numpy as np
import torch
from torch import nn
from torch.utils import data
from net import MyLeNet5,MyLeNet5_NCL,ResNet50_NCL,ResNet50
from torch.optim import lr_scheduler
from torchvision import transforms
import os
import dataset
import matplotlib.pyplot as plt
from tqdm import tqdm


class Trainer:
    def __init__(self, vision):
        self.vision = vision
        # 设置训练设备
        self.device = "cuda" if torch.cuda.is_available() else 'cpu'
        print("training by " + self.device + "!")
        # 选择模型
        self.model = ResNet50(5).to(self.device)  # 调用net里面定义的模型，将模型数据转到GPU

        '''冻结部分权重'''
        for name, para in self.model.named_parameters():
            # 除最后的全连接层外，其他权重全部冻结
            if all(word if word not in name else False for word in ["fc","f1","f2","f3","f4","f5"]):
                para.requires_grad_(False)
                print(name + "已冻结")
            else:
                print(name + "未冻结")

        # 定义损失函数（交叉熵损失）
        self.CE = nn.CrossEntropyLoss()
        self.MSE = nn.MSELoss(reduction='sum')
        # 定义一个优化器（SGD优化器）
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-3, momentum=0.9)
        # 学习率每隔10轮，变为原来的0.1。optimizer表示要更改学习率的优化器。step_size表示每训练step_size个epoch就更新一次参数。gamma表示更新lr的乘子
        self.lr_scheduler = lr_scheduler.StepLR(optimizer=self.optimizer, step_size=10, gamma=0.1)
        # 读取dataset
        self.train_dataset = dataset.makeDataset('train','wavV3_to_RGB')
        self.test_dataset = dataset.makeDataset('test','wavV3_to_RGB')
        # 数据集预处理
        self.data_transform = transforms.Compose([transforms.ToTensor()])
        # 加载DataLoder
        self.batch_size = 32
        self.train_dataloader = data.DataLoader(dataset=self.train_dataset, batch_size=self.batch_size, shuffle=True)
        self.test_dataloader = data.DataLoader(dataset=self.test_dataset, batch_size=self.batch_size, shuffle=True)

    @classmethod
    def one_hot(cls,x, class_count):
        # 第一构造一个[class_count, class_count]的对角线为1的向量
        # 第二保留label对应的行并返回
        return torch.eye(class_count)[x, :]

    @classmethod
    def significance(cls,O):
        S = []
        for o in O:     # 遍历每个基学习器的输出结果
            s = []      # 基学习器o在该批样本上的显著性
            for i in o: # 学习器o在第i个样本上的预测结果
                i = i.cpu().detach().numpy()
                mean = np.mean(i)
                max = np.max(i)
                s.append(np.abs(max-mean))
            # s = (s-np.min(s))/(np.max(s)-np.min(s))
            # s = s/np.sum(s)
            S.append(s) #
        S = S / np.max(S,axis=0)
        S = np.array(S).T
        return S

    def train(self):
        model = self.model
        device = self.device
        dataloader = self.train_dataloader
        optimizer = self.optimizer


        model.train()  # 将模型设置为训练模式，告诉网络当前阶段为训练阶段，模型的参数再该阶段会更新

        cumulative_loss = 0  # 已训练batch的累计误差
        cumulative_acc = 0.0  # 已训练batch的累计准确率
        n = 0  # 已训练batch的计数
        # loss, current, n = 0.0, 0.0, 0  # loss总误差，current总正确率，n已训练的图片数量
        t = tqdm(dataloader, desc=f'[train]epoch:{self.epoch}')
        for X, y in t:  # enumerate(iteration,start)返回下标和对应的值。batch记录这是第几个(X,y)，X记录图片，y记录实际标签
            X = X.to(device)  # 加载X到device
            y = y.to(device)  # 加载y到device

            '''MyLeNet5'''
            # output = model(X)  # 输出一个矩阵，表示当前batch的X属于各类的概率
            # cur_loss = self.CE(output, y)  # 计算误差
            # max_probability, pred_label = torch.max(output, dim=1)
            # # dim表示取行最大(0)还是列最大(1)。函数会返回两个tensor，第一个tensor是每行的最大值；第二个tensor是每行最大值的索引。
            # cur_acc = torch.sum(y.eq(pred_label)) / output.shape[0]  # 计算当前批次（16张图片）的预测准确率
            '''MyLeNet5_NCL'''
            # o1,o2,o3,o4,o5 = model(X)
            # output = (o1+o2+o3+o4+o5)/5         # 集成输出值
            # lamb = 0.4                  # lambda的值
            # p = -(self.MSE(o1,output)+self.MSE(o2,output)+self.MSE(o3,output)+self.MSE(o4,output)+self.MSE(o5,output))
            # y_p = Trainer.one_hot(y.cpu(),len(o1[0])).to(device)
            # e = self.MSE(o1,y_p)+self.MSE(o2,y_p)+self.MSE(o3,y_p)+self.MSE(o4,y_p)+self.MSE(o5,y_p)
            # cur_loss = e + lamb * p  # 计算误差
            # max_probability, pred_label = torch.max(output, dim=1)
            # cur_acc = torch.sum(y.eq(pred_label)) / output.shape[0]
            '''ResNet50'''
            output = model(X)  # 输出一个矩阵，表示当前batch的X属于各类的概率
            y_p = Trainer.one_hot(y.cpu(), len(output[0])).to(device)
            cur_loss = self.MSE(output, y_p)  # 计算误差
            max_probability, pred_label = torch.max(output, dim=1)
            # dim表示取行最大(0)还是列最大(1)。函数会返回两个tensor，第一个tensor是每行的最大值；第二个tensor是每行最大值的索引。
            cur_acc = torch.sum(y.eq(pred_label)) / output.shape[0]  # 计算当前批次（16张图片）的预测准确率
            '''ResNet50_NCL'''
            # o1,o2,o3,o4,o5 = model(X)
            # output = (o1+o2+o3+o4+o5)/5         # 集成输出值
            # lamb = 0.4                  # lambda的值
            # p = -(self.MSE(o1,output)+self.MSE(o2,output)+self.MSE(o3,output)+self.MSE(o4,output)+self.MSE(o5,output))
            # y_p = Trainer.one_hot(y.cpu(),len(o1[0])).to(device)
            # e = self.MSE(o1,y_p)+self.MSE(o2,y_p)+self.MSE(o3,y_p)+self.MSE(o4,y_p)+self.MSE(o5,y_p)
            # cur_loss = e + lamb * p  # 计算误差
            # max_probability, pred_label = torch.max(output, dim=1)
            # cur_acc = torch.sum(y.eq(pred_label)) / output.shape[0]
            '''proposed ResNet50_NCL'''
            # o1,o2,o3,o4,o5 = model(X)
            # S = Trainer.significance([o1,o2,o3,o4,o5])          # 计算基学习器显著性
            # for idx,o in enumerate([o1,o2,o3,o4,o5]):
            #     s = S[:,idx]
            #     for idxx,ss in enumerate(s):
            #         o[idxx] = o[idxx] * ss
            # output = (o1+o2+o3+o4+o5)/5         # 集成输出值
            # lamb = 0.4                  # lambda的值
            # p = -(self.MSE(o1,output)+self.MSE(o2,output)+self.MSE(o3,output)+self.MSE(o4,output)+self.MSE(o5,output))
            # y_p = Trainer.one_hot(y.cpu(),len(o1[0])).to(device)
            # e = self.MSE(o1,y_p)+self.MSE(o2,y_p)+self.MSE(o3,y_p)+self.MSE(o4,y_p)+self.MSE(o5,y_p)
            # cur_loss = e + lamb * p  # 计算误差
            # max_probability, pred_label = torch.max(output, dim=1)
            # cur_acc = torch.sum(y.eq(pred_label)) / output.shape[0]


            optimizer.zero_grad()  # 将梯度归零
            cur_loss.backward()  # 反向传播计算各个参数的梯度值
            optimizer.step()  # 梯度下降执行一步参数更新
            cumulative_loss += cur_loss.item()  # .item()表示提取cur_loss张量中的元素值。cur_loss表示当前批次误差。loss表示总误差。
            cumulative_acc += cur_acc.item()  # cur_acc表示当前批次正确率，current表示总正确率。
            n = n + 1  # 记录批次数量（总共有多少批次）
            t.set_postfix({'train average loss': cumulative_loss / n,'train_Average_acc': cumulative_acc / n})
        print("train_Average_loss:" + str(cumulative_loss / n))  # 计算平均训练误差
        print("train_Average_acc:" + str(cumulative_acc / n))  # 计算平均正确率
        return cumulative_acc / n, cumulative_loss / n  # 返回平均正确率和平均误差

    def test(self):
        model = self.model
        device = self.device
        dataloader = self.test_dataloader

        model.eval()  # 将模型设置为验证模式，告诉网络当前阶段为测试阶段，模型的参数再该阶段不更新
        cumulative_loss = 0  # 已训练batch的累计误差
        cumulative_acc = 0.0  # 已训练batch的累计准确率
        n = 0  # 已训练batch的计数
        with torch.no_grad():  # 使用no_grad则设置让梯度Autograd设置为False(因为在训练中我们默认是True)，这样保证了反向过程为纯粹的测试，而不变参数。
            t = tqdm(dataloader, desc=f'[test]epoch:{self.epoch}')
            for X, y in t:
                # 前向传播
                X, y = X.to(device), y.to(device)
                '''MyLeNet5'''
                # output = model(X)
                # cur_loss = self.CE(output, y)
                '''MyLeNet5_NCL'''
                # o1, o2, o3, o4, o5 = model(X)
                # output = (o1 + o2 + o3 + o4 + o5) / 5  # 集成输出值
                # cur_loss = self.CE(output, y)
                '''ResNet50'''
                output = model(X)
                cur_loss = self.CE(output, y)
                '''ResNet50_NCL'''
                # o1, o2, o3, o4, o5 = model(X)
                # output = (o1 + o2 + o3 + o4 + o5) / 5  # 集成输出值
                # cur_loss = self.CE(output, y)
                '''proposed ResNet50_NCL'''
                # o1, o2, o3, o4, o5 = model(X)
                # S = Trainer.significance([o1, o2, o3, o4, o5])  # 计算基学习器显著性
                # for idx, o in enumerate([o1, o2, o3, o4, o5]):
                #     s = S[:, idx]
                #     for idxx, ss in enumerate(s):
                #         o[idxx] = o[idxx] * ss
                # output = (o1 + o2 + o3 + o4 + o5) / 5  # 集成输出值
                # cur_loss = self.CE(output, y)

                _, pred = torch.max(output, dim=1)  # 返回输入张量给定维度上每行的最大值，并同时返回每个最大值的位置索引。
                cur_acc = torch.sum(y.eq(pred)) / output.shape[0]
                cumulative_loss += cur_loss.item()
                cumulative_acc += cur_acc.item()
                n = n + 1
                t.set_postfix({'test average loss': cumulative_loss / n,'test_Average_acc': cumulative_acc / n})
            print("val_Average_loss:" + str(cumulative_loss / n))
            print("val_Average_acc:" + str(cumulative_acc / n))

            return cumulative_acc / n, cumulative_loss / n  # 返回平均正确率和平均误差

    def start_training(self, epoch):
        train_Average_loss_trend = []
        val_Average_loss_trend = []
        train_Average_acc_trend = []
        val_Average_acc_trend = []

        model = self.model

        # 开始训练
        min_acc = 0  # 初始化最小正确率为0
        for t in range(epoch):  # 循环50次
            self.epoch = t
            train_Average_acc, train_Average_loss = self.train()  # 训练
            val_Average_acc, val_Average_loss = self.test()  # 记录这次训练得到的模型的平均正确率

            train_Average_loss_trend.append(train_Average_loss)
            train_Average_acc_trend.append(train_Average_acc)
            val_Average_loss_trend.append(val_Average_loss)
            val_Average_acc_trend.append(val_Average_acc)

            with open('log.txt', 'a+') as f:
                f.write('epoch{} - train_Average_loss{},val_Average_loss{},train_Average_acc{},val_Average_acc{}\n'.format(t+1, train_Average_loss,val_Average_loss,train_Average_acc,val_Average_acc))
            # 保存最好的模型权重
            if val_Average_acc > min_acc:  # 如果比最小的平均正确率大，那么保存这次训练得到的模型
                folder = 'save_model'
                if not os.path.exists(folder):  # os.path.exists(folder)判断文件夹是否存在，范围true或false
                    os.mkdir('save_model')  # 创建目录
                min_acc = val_Average_acc  # 更新最小正确率
                print("Current best model has been saved!")
                torch.save(model.state_dict(),
                           'save_model/LeNet_' + self.vision + '.pth')  # 保存训练好的权重到指定目录；torch.save(model,...)保存整个模型
        print('training finished!')
        self.draw(train_Average_loss_trend, val_Average_loss_trend, train_Average_acc_trend, val_Average_acc_trend)

    def draw(self, train_Average_loss, val_Average_loss, train_Average_acc, val_Average_acc):
        fig = plt.figure()
        plt.suptitle('LeNet_' + self.vision)
        ax1 = plt.subplot(221)
        ax1.set_title('train_Average_loss')
        ax2 = plt.subplot(222)
        ax2.set_title('val_Average_loss')
        ax3 = plt.subplot(223)
        ax3.set_title('train_Average_acc')
        ax4 = plt.subplot(224)
        ax4.set_title('val_Average_acc')
        ax1.plot(train_Average_loss)
        ax2.plot(val_Average_loss)
        ax3.plot(train_Average_acc)
        ax4.plot(val_Average_acc)
        fig.tight_layout()
        plt.savefig('./result_img/result_img_LeNet_' + self.vision + '.png')


if __name__ == '__main__':
    zqd = Trainer(vision='VResNet50')
    zqd.start_training(10)
